From 46f96c557277bd8dbcf93b30f549d98b1e9f42e1 Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 10:19:15 +0100 Subject: [PATCH 1/8] kernel: Remove 4.19.x-rt patches and configs Signed-off-by: Rolf Neugebauer --- kernel/Makefile | 2 - kernel/config-4.19.x-aarch64 | 4550 ----------------- kernel/config-4.19.x-aarch64-rt | 20 - kernel/config-4.19.x-x86_64-rt | 22 - ...M-at91-add-TCB-registers-definitions.patch | 209 - ...ers-Add-a-new-driver-for-the-Atmel-A.patch | 484 -- ...ers-timer-atmel-tcb-add-clockevent-d.patch | 270 - ...drivers-atmel-pit-make-option-silent.patch | 35 - ...at91-Implement-clocksource-selection.patch | 54 - ...onfigs-at91-use-new-TCB-timer-driver.patch | 42 - .../0007-ARM-configs-at91-unselect-PIT.patch | 43 - ...ts-Move-pending-table-allocation-to-.patch | 170 - ...-convert-worker-lock-to-raw-spinlock.patch | 202 - ...m-qi-simplify-CGR-allocation-freeing.patch | 139 - ...obustify-CFS-bandwidth-timer-locking.patch | 147 - ...012-arm-Convert-arm-boot_lock-to-raw.patch | 431 -- ...-let-setaffinity-unmask-threaded-EOI.patch | 100 - ...irqsave-in-cgroup_rstat_flush_locked.patch | 49 - ...lize-cookie-hash-table-raw-spinlocks.patch | 63 - ...mbus-include-header-for-get_irq_regs.patch | 39 - ...de-irqflags.h-for-raw_local_irq_save.patch | 32 - .../0018-efi-Allow-efi-runtime.patch | 31 - ...fi-drop-task_lock-from-efi_switch_mm.patch | 54 - ...e_layout-before-altenates-are-applie.patch | 82 - ...-phandle-cache-outside-of-the-devtre.patch | 102 - ...ake-quarantine_lock-a-raw_spinlock_t.patch | 97 - ...xpedited-GP-parallelization-cleverne.patch | 50 - ...-kmemleak_lock-to-raw-spinlock-on-RT.patch | 168 - ...-replace-seqcount_t-with-a-seqlock_t.patch | 135 - ...vide-a-pointer-to-the-valid-CPU-mask.patch | 784 --- ...ernel-sched-core-add-migrate_disable.patch | 265 - ...sable-Add-export_symbol_gpl-for-__mi.patch | 37 - ...o-not-disable-enable-clocks-in-a-row.patch | 97 - ...B-Allow-higher-clock-rates-for-clock.patch | 169 - ...31-timekeeping-Split-jiffies-seqlock.patch | 170 - ...2-signal-Revert-ptrace-preempt-magic.patch | 37 - ...et-sched-Use-msleep-instead-of-yield.patch | 63 - ...rq-remove-BUG_ON-irqs_disabled-check.patch | 36 - ...do-no-disable-interrupts-in-giveback.patch | 45 - ...rovide-PREEMPT_RT_BASE-config-switch.patch | 63 - ...sable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch | 75 - ...abel-disable-if-stop_machine-is-used.patch | 41 - ...config-options-which-are-not-RT-comp.patch | 42 - .../0040-lockdep-disable-self-test.patch | 34 - .../0041-mm-Allow-only-slub-on-RT.patch | 36 - ...locking-Disable-spin-on-owner-for-RT.patch | 37 - ...043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch | 29 - ...044-rcu-make-RCU_BOOST-default-on-RT.patch | 33 - ...-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch | 34 - ...46-net-core-disable-NET_RX_BUSY_POLL.patch | 37 - ...0047-arm-disable-NEON-in-kernel-mode.patch | 165 - ...0048-powerpc-Use-generic-rwsem-on-RT.patch | 32 - ...ble-in-kernel-MPIC-emulation-for-PRE.patch | 44 - .../0050-powerpc-Disable-highmem-on-RT.patch | 28 - .../0051-mips-Disable-highmem-on-RT.patch | 28 - ...86-Use-generic-rwsem_spinlocks-on-rt.patch | 33 - ...ds-trigger-disable-CPU-trigger-on-RT.patch | 40 - ...rop-K8-s-driver-from-beeing-selected.patch | 38 - .../0055-md-disable-bcache.patch | 40 - ...6-efi-Disable-runtime-services-on-RT.patch | 45 - ...0057-printk-Add-a-printk-kill-switch.patch | 173 - ..._early_printk-boot-param-to-help-wit.patch | 38 - ...pt-Provide-preempt_-_-no-rt-variants.patch | 52 - ...-migrate_disable-enable-in-different.patch | 69 - .../0061-rt-Add-local-irq-locks.patch | 340 -- ...provide-get-put-_locked_ptr-variants.patch | 48 - ...catterlist-Do-not-disable-irqs-on-RT.patch | 29 - ...-x86-Delay-calling-signals-in-atomic.patch | 153 - ...ignal-delay-calling-signals-on-32bit.patch | 48 - ...head-Replace-bh_uptodate_lock-for-rt.patch | 196 - ...-state-lock-and-journal-head-lock-rt.patch | 109 - ...st_bl-Make-list-head-locking-RT-safe.patch | 120 - ...-list_bl-fixup-bogus-lockdep-warning.patch | 103 - .../0070-genirq-Disable-irqpoll-on-rt.patch | 42 - ...-genirq-Force-interrupt-thread-on-RT.patch | 53 - ...d-zone-lock-while-freeing-pages-from.patch | 172 - ...d-zone-lock-while-freeing-pages-from.patch | 171 - ...B-change-list_lock-to-raw_spinlock_t.patch | 618 --- ...ving-back-empty-slubs-to-IRQ-enabled.patch | 222 - ...page_alloc-rt-friendly-per-cpu-pages.patch | 238 - ...077-mm-swap-Convert-to-percpu-locked.patch | 210 - ...m-perform-lru_add_drain_all-remotely.patch | 108 - ...t-per-cpu-variables-with-preempt-dis.patch | 144 - ...plit-page-table-locks-for-vector-pag.patch | 75 - .../0081-mm-Enable-SLUB-for-RT.patch | 41 - ...0082-slub-Enable-irqs-for-__GFP_WAIT.patch | 47 - .../0083-slub-Disable-SLUB_CPU_PARTIAL.patch | 53 - ...n-t-call-schedule_work_on-in-preempt.patch | 74 - ...place-local_irq_disable-with-local-l.patch | 123 - ...oc-copy-with-get_cpu_var-and-locking.patch | 202 - ...le-preemption-__split_large_page-aft.patch | 61 - .../0088-radix-tree-use-local-locks.patch | 175 - ...9-timers-Prepare-for-full-preemption.patch | 175 - ...090-x86-kvm-Require-const-tsc-for-RT.patch | 36 - ...ec-Don-t-use-completion-s-wait-queue.patch | 114 - .../0092-wait.h-include-atomic.h.patch | 41 - ...mple-Simple-work-queue-implemenation.patch | 245 - ...-a-shit-statement-in-SWORK_EVENT_PEN.patch | 36 - ...95-completion-Use-simple-wait-queues.patch | 390 -- .../0096-fs-aio-simple-simple-work.patch | 88 - ...voke-the-affinity-callback-via-a-wor.patch | 141 - ...id-schedule_work-with-interrupts-dis.patch | 59 - ...ate-hrtimer_init-hrtimer_init_sleepe.patch | 289 -- ...100-hrtimers-Prepare-full-preemption.patch | 289 -- ...s-by-default-into-the-softirq-contex.patch | 236 - ...air-Make-the-hrtimers-non-hard-again.patch | 33 - ...-schedule_work-call-to-helper-thread.patch | 97 - ...te-change-before-hrtimer_cancel-in-d.patch | 50 - ...timers-Thread-posix-cpu-timers-on-rt.patch | 268 - ...ched-Move-task_struct-cleanup-to-RCU.patch | 95 - ...-number-of-task-migrations-per-batch.patch | 32 - .../0108-sched-Move-mmdrop-to-RCU-on-RT.patch | 139 - ...e-stack-kprobe-clean-up-to-__put_tas.patch | 69 - ...state-for-tasks-blocked-on-sleeping-.patch | 107 - ...ount-rcu_preempt_depth-on-RT-in-migh.patch | 56 - ...-proper-LOCK_OFFSET-for-cond_resched.patch | 32 - .../0113-sched-Disable-TTWU_QUEUE-on-RT.patch | 37 - ...Only-wake-up-idle-workers-if-not-blo.patch | 44 - ...ease-the-nr-of-migratory-tasks-when-.patch | 161 - ...-hotplug-Lightweight-get-online-cpus.patch | 100 - ...e-disabled-counter-to-tracing-output.patch | 85 - .../0118-lockdep-Make-it-RT-aware.patch | 77 - ...tasklets-from-going-into-infinite-sp.patch | 302 -- ...eemption-after-reenabling-interrupts.patch | 179 - ...oftirq-Disable-softirq-stacks-for-RT.patch | 176 - .../0122-softirq-Split-softirq-locks.patch | 831 --- ...-use-local_bh_disable-in-netif_rx_ni.patch | 40 - ...abling-of-softirq-processing-in-irq-.patch | 160 - ...plit-timer-softirqs-out-of-ksoftirqd.patch | 214 - ...cal_softirq_pending-messages-if-ksof.patch | 111 - ...cal_softirq_pending-messages-if-task.patch | 40 - .../0128-rtmutex-trylock-is-okay-on-RT.patch | 33 - ...-nfs-turn-rmdir_sem-into-a-semaphore.patch | 150 - ...he-various-new-futex-race-conditions.patch | 254 - ...on-when-a-requeued-RT-task-times-out.patch | 120 - ...k-unlock-symetry-versus-pi_lock-and-.patch | 49 - .../0133-pid.h-include-atomic.h.patch | 42 - ...arm-include-definition-for-cpumask_t.patch | 30 - ...ure-Do-NOT-include-rwlock.h-directly.patch | 33 - ...36-rtmutex-Add-rtmutex_lock_killable.patch | 59 - ...0137-rtmutex-Make-lock_killable-work.patch | 50 - ...spinlock-Split-the-lock-types-header.patch | 220 - .../0139-rtmutex-Avoid-include-hell.patch | 29 - ...-rbtree-don-t-include-the-rcu-header.patch | 174 - ...tex-Provide-rt_mutex_slowlock_locked.patch | 144 - ...ockdep-less-version-of-rt_mutex-s-lo.patch | 152 - ...tex-add-sleeping-lock-implementation.patch | 1207 ----- ...utex-implementation-based-on-rtmutex.patch | 382 -- ...wsem-implementation-based-on-rtmutex.patch | 426 -- ...lock-implementation-based-on-rtmutex.patch | 581 --- ...-preserve-state-like-a-sleeping-lock.patch | 32 - .../0148-rtmutex-wire-up-RT-s-locking.patch | 270 - ...utex-add-ww_mutex-addon-for-mutex-rt.patch | 444 -- .../0150-kconfig-Add-PREEMPT_RT_FULL.patch | 68 - ...-fix-deadlock-in-device-mapper-block.patch | 80 - ...utex-Flush-block-plug-on-__down_read.patch | 45 - ...re-init-the-wait_lock-in-rt_mutex_in.patch | 38 - ...ace-fix-ptrace-vs-tasklist_lock-race.patch | 165 - ...mutex-annotate-sleeping-lock-context.patch | 307 -- ...sable-fallback-to-preempt_disable-in.patch | 203 - ...eck-for-__LINUX_SPINLOCK_TYPES_H-on-.patch | 181 - .../0158-rcu-Frob-softirq-test.patch | 172 - ...59-rcu-Merge-RCU-bh-into-RCU-preempt.patch | 350 -- ...ke-ksoftirqd-do-RCU-quiescent-states.patch | 116 - ...nate-softirq-processing-from-rcutree.patch | 422 -- ...-use-cpu_online-instead-custom-check.patch | 95 - ...place-local_irqsave-with-a-locallock.patch | 76 - ..._normal_after_boot-by-default-for-RT.patch | 36 - ...erial-omap-Make-the-locking-RT-aware.patch | 48 - ...al-pl011-Make-the-locking-work-on-RT.patch | 53 - ...-explicitly-initialize-the-flags-var.patch | 44 - ...mprove-the-serial-console-PASS_LIMIT.patch | 46 - ...0-don-t-take-the-trylock-during-oops.patch | 34 - ...wsem-Remove-preempt_disable-variants.patch | 225 - ...ate_mm-by-preempt_-disable-enable-_r.patch | 80 - ...back-explicit-INIT_HLIST_BL_HEAD-ini.patch | 60 - ...e-preemption-on-i_dir_seq-s-write-si.patch | 98 - ...e-of-local-lock-in-multi_cpu-decompr.patch | 71 - ...rmal-Defer-thermal-wakups-to-threads.patch | 136 - ...e-preemption-around-local_bh_disable.patch | 37 - ...poll-Do-not-disable-preemption-on-RT.patch | 36 - ...er-preempt-disable-region-which-suck.patch | 72 - .../0179-block-mq-use-cpu_light.patch | 35 - ...ock-mq-do-not-invoke-preempt_disable.patch | 57 - ...k-mq-don-t-complete-requests-via-IPI.patch | 118 - ...-Make-raid5_percpu-handling-RT-aware.patch | 76 - .../0183-rt-Introduce-cpu_chill.patch | 112 - ...rtimer-Don-t-lose-state-in-cpu_chill.patch | 47 - ...chill-save-task-state-in-saved_state.patch | 62 - ...e-blk_queue_usage_counter_release-in.patch | 119 - ...-block-Use-cpu_chill-for-retry-loops.patch | 50 - ...cache-Use-cpu_chill-in-trylock-loops.patch | 65 - ...t-Use-cpu_chill-instead-of-cpu_relax.patch | 69 - ...use-swait_queue-instead-of-waitqueue.patch | 240 - .../0191-workqueue-Use-normal-rcu.patch | 359 -- ...cal-irq-lock-instead-of-irq-disable-.patch | 185 - ...t-workqueue-versus-ata-piix-livelock.patch | 138 - ...tangle-worker-accounting-from-rqlock.patch | 290 -- .../0195-debugobjects-Make-RT-aware.patch | 31 - .../0196-seqlock-Prevent-rt-starvation.patch | 194 - ...vc_xprt_do_enqueue-use-get_cpu_light.patch | 60 - ...0198-net-Use-skbufhead-with-raw-lock.patch | 172 - ...recursion-to-per-task-variable-on-RT.patch | 277 - ...y-to-delegate-processing-a-softirq-t.patch | 88 - ...ake-qdisc-s-busylock-in-__dev_xmit_s.patch | 41 - ...Qdisc-use-a-seqlock-instead-seqcount.patch | 292 -- ...-missing-serialization-in-ip_send_un.patch | 98 - .../0204-net-add-a-lock-around-icmp_sk.patch | 64 - ...schedule_irqoff-disable-interrupts-o.patch | 76 - ...-push-most-work-into-softirq-context.patch | 263 - .../0207-printk-Make-rt-aware.patch | 113 - ...n-t-try-to-print-from-IRQ-NMI-region.patch | 47 - ...intk-Drop-the-logbuf_lock-more-often.patch | 82 - ...n-translation-section-permission-fau.patch | 95 - ...-irq_set_irqchip_state-documentation.patch | 31 - ...wngrade-preempt_disable-d-region-to-.patch | 58 - ...-preemp_disable-in-addition-to-local.patch | 170 - ...14-kgdb-serial-Short-term-workaround.patch | 85 - ...-sysfs-Add-sys-kernel-realtime-entry.patch | 53 - .../0216-mm-rt-kmap_atomic-scheduling.patch | 324 -- ...highmem-Add-a-already-used-pte-check.patch | 28 - .../0218-arm-highmem-Flush-tlb-on-unmap.patch | 33 - .../0219-arm-Enable-highmem-for-rt.patch | 183 - .../0220-scsi-fcoe-Make-RT-aware.patch | 115 - ...ypto-Reduce-preempt-disabled-regions.patch | 117 - ...-preempt-disabled-regions-more-algos.patch | 262 - ...ypto-limit-more-FPU-enabled-sections.patch | 107 - ...-serialize-RT-percpu-scratch-buffer-.patch | 82 - ...d-a-lock-instead-preempt_disable-loc.patch | 84 - ...andom_bytes-for-RT_FULL-in-init_oops.patch | 33 - ...ackprotector-Avoid-random-pool-on-rt.patch | 50 - .../0228-random-Make-it-work-on-rt.patch | 166 - ...29-cpu-hotplug-Implement-CPU-pinning.patch | 116 - ...ed-user-tasks-to-be-awakened-to-the-.patch | 38 - ...duct-tape-RT-rwlock-usage-for-non-RT.patch | 101 - ...ove-preemption-disabling-in-netif_rx.patch | 70 - ...r-local_irq_disable-kmalloc-headache.patch | 64 - ...-users-of-napi_alloc_cache-against-r.patch | 118 - ...rialize-xt_write_recseq-sections-on-.patch | 83 - ...Add-a-mutex-around-devnet_rename_seq.patch | 112 - ...-Only-do-hardirq-context-test-for-ra.patch | 61 - ...-fix-warnings-due-to-missing-PREEMPT.patch | 148 - ...ched-Add-support-for-lazy-preemption.patch | 670 --- ...40-ftrace-Fix-trace-header-alignment.patch | 51 - ...0241-x86-Support-for-lazy-preemption.patch | 239 - ...-properly-check-against-preempt-mask.patch | 32 - ...-use-proper-return-label-on-32bit-x8.patch | 44 - ...-arm-Add-support-for-lazy-preemption.patch | 167 - ...erpc-Add-support-for-lazy-preemption.patch | 195 - ...-arch-arm64-Add-lazy-preempt-support.patch | 132 - ...c-Protect-send_msg-with-a-local-lock.patch | 73 - ...am-Replace-bit-spinlocks-with-rtmute.patch | 103 - ...-t-disable-preemption-in-zcomp_strea.patch | 106 - ...-zcomp_stream_get-smp_processor_id-u.patch | 43 - ...51-tpm_tis-fix-stall-after-iowrite-s.patch | 83 - ...t-deferral-of-watchdogd-wakeup-on-RT.patch | 80 - ...Use-preempt_disable-enable_rt-where-.patch | 60 - ...al_lock-unlock_irq-in-intel_pipe_upd.patch | 127 - .../0255-drm-i915-disable-tracing-on-RT.patch | 46 - ...M_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch | 32 - ...roups-use-simple-wait-in-css_release.patch | 94 - ...vert-callback_lock-to-raw_spinlock_t.patch | 292 -- ...-a-locallock-instead-preempt_disable.patch | 84 - ...rkqueue-Prevent-deadlock-stall-on-RT.patch | 206 - ...t-tasks-to-cache-one-sigqueue-struct.patch | 212 - ...0262-Add-localversion-for-RT-release.patch | 21 - ...iommu-Use-a-locallock-instead-local_.patch | 96 - .../0264-powerpc-reshuffle-TIF-bits.patch | 151 - ...-Convert-show_lock-to-raw_spinlock_t.patch | 62 - ...isable-interrupts-independently-of-t.patch | 50 - ...-Fix-a-lockup-in-wait_for_completion.patch | 68 - ...8-kthread-add-a-global-worker-thread.patch | 179 - ...voke-the-affinity-callback-via-a-wor.patch | 100 - ...ssing-work_struct-in-irq_set_affinit.patch | 41 - ...-arm-imx6-cpuidle-Use-raw_spinlock_t.patch | 51 - ...to-change-rcu_normal_after_boot-on-R.patch | 35 - ...chtec-fix-stream_open.cocci-warnings.patch | 39 - ...-Drop-a-preempt_disable_rt-statement.patch | 49 - ...notification-of-canceling-timers-on-.patch | 649 --- ...ure-lock-unlock-symetry-versus-pi_lo.patch | 34 - ...-bug-on-when-a-requeued-RT-task-time.patch | 82 - ...andle-the-various-new-futex-race-con.patch | 257 - ...karound-migrate_disable-enable-in-di.patch | 69 - ...-Make-the-futex_hash_bucket-lock-raw.patch | 348 -- ...futex-Delay-deallocation-of-pi_state.patch | 182 - ...disable-preemption-in-zswap_frontswa.patch | 126 - .../patches-4.19.x-rt/0283-revert-aio.patch | 70 - .../0284-fs-aio-simple-simple-work.patch | 75 - .../0285-revert-thermal.patch | 119 - ...rmal-Defer-thermal-wakups-to-threads.patch | 97 - .../patches-4.19.x-rt/0287-revert-block.patch | 82 - ...e-blk_queue_usage_counter_release-in.patch | 113 - .../0289-workqueue-rework.patch | 1568 ------ ...0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch | 47 - ...0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch | 41 - ...nsure-inactive_timer-runs-in-hardirq.patch | 49 - ...temp-make-pkg_temp_lock-a-raw-spinlo.patch | 119 - ...lock_t-instread-disabling-preemption.patch | 295 -- ...t-the-timer-expire-in-hardirq-contex.patch | 53 - ...k-preemption-level-before-looking-at.patch | 33 - ..._ONCE-to-access-timer-base-in-hrimer.patch | 41 - ...ab-the-expiry-lock-for-non-soft-hrti.patch | 40 - ...using-hrtimer_grab_expiry_lock-on-mi.patch | 42 - ...ssing-bracket-and-hide-migration_bas.patch | 74 - ...lock-expiry-lock-in-the-early-return.patch | 40 - ...s-enable-Use-sleeping_lock-to-annota.patch | 57 - ..._allowed_ptr-Check-cpus_mask-not-cpu.patch | 38 - ...Remove-dead-__migrate_disabled-check.patch | 38 - ...e-disable-Protect-cpus_ptr-with-lock.patch | 46 - ...processor_id-Don-t-use-cpumask_equal.patch | 44 - ...utex_hash_bucket-spinlock_t-again-an.patch | 737 --- ...Clean-pi_blocked_on-in-the-error-cas.patch | 101 - ...ib-ubsan-Don-t-seralize-UBSAN-report.patch | 307 -- ...the-lock-of-kmemleak_object-to-raw_s.patch | 292 -- ...igrate_enable-Use-select_fallback_rq.patch | 65 - ...ched-Lazy-migrate_disable-processing.patch | 615 --- ...grate_enable-Use-stop_one_cpu_nowait.patch | 120 - ...alize-split-page-table-locks-for-vec.patch | 86 - ...nlock_t-and-rwlock_t-a-RCU-section-o.patch | 128 - ...te_enable-must-access-takedown_cpu_t.patch | 54 - ...r_id-Adjust-check_preemption_disable.patch | 40 - ...able-Busy-loop-until-the-migration-r.patch | 56 - ...fd-Use-a-seqlock-instead-of-seqcount.patch | 79 - ...ate_enable-Use-per-cpu-cpu_stop_work.patch | 82 - ...igrate_enable-Remove-__schedule-call.patch | 40 - ...ve-misplaced-local_unlock_irqrestore.patch | 41 - ...Include-header-for-the-current-macro.patch | 31 - ...preempt_disable-in-vmw_fifo_ping_hos.patch | 44 - ...empt_lazy-and-migrate_disable-counte.patch | 55 - ...-flags-parameter-from-calls-to-ubsan.patch | 56 - ...cking-of-IRQ_WORK_LAZY-flag-set-on-n.patch | 71 - .../0328-Linux-4.19.106-rt46-REBASE.patch | 19 - 332 files changed, 47842 deletions(-) delete mode 100644 kernel/config-4.19.x-aarch64 delete mode 100644 kernel/config-4.19.x-aarch64-rt delete mode 100644 kernel/config-4.19.x-x86_64-rt delete mode 100644 kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch delete mode 100644 kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch delete mode 100644 kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch delete mode 100644 kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch delete mode 100644 kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch delete mode 100644 kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch delete mode 100644 kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch delete mode 100644 kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch delete mode 100644 kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch delete mode 100644 kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch delete mode 100644 kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch delete mode 100644 kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch delete mode 100644 kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch delete mode 100644 kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch delete mode 100644 kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch delete mode 100644 kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch delete mode 100644 kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch delete mode 100644 kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch delete mode 100644 kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch delete mode 100644 kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch delete mode 100644 kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch delete mode 100644 kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch delete mode 100644 kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch delete mode 100644 kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch delete mode 100644 kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch delete mode 100644 kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch delete mode 100644 kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch delete mode 100644 kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch delete mode 100644 kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch delete mode 100644 kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch delete mode 100644 kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch delete mode 100644 kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch delete mode 100644 kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch delete mode 100644 kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch delete mode 100644 kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch delete mode 100644 kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch delete mode 100644 kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch delete mode 100644 kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch delete mode 100644 kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch delete mode 100644 kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch delete mode 100644 kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch delete mode 100644 kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch delete mode 100644 kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch delete mode 100644 kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch delete mode 100644 kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch delete mode 100644 kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch delete mode 100644 kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch delete mode 100644 kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch delete mode 100644 kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch delete mode 100644 kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch delete mode 100644 kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch delete mode 100644 kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch delete mode 100644 kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch delete mode 100644 kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch delete mode 100644 kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch delete mode 100644 kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch delete mode 100644 kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch delete mode 100644 kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch delete mode 100644 kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch delete mode 100644 kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch delete mode 100644 kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch delete mode 100644 kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch delete mode 100644 kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch delete mode 100644 kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch delete mode 100644 kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch delete mode 100644 kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch delete mode 100644 kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch delete mode 100644 kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch delete mode 100644 kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch delete mode 100644 kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch delete mode 100644 kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch delete mode 100644 kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch delete mode 100644 kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch delete mode 100644 kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch delete mode 100644 kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch delete mode 100644 kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch delete mode 100644 kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch delete mode 100644 kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch delete mode 100644 kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch delete mode 100644 kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch delete mode 100644 kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch delete mode 100644 kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch delete mode 100644 kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch delete mode 100644 kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch delete mode 100644 kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch delete mode 100644 kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch delete mode 100644 kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch delete mode 100644 kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch delete mode 100644 kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch delete mode 100644 kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch delete mode 100644 kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch delete mode 100644 kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch delete mode 100644 kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch delete mode 100644 kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch delete mode 100644 kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch delete mode 100644 kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch delete mode 100644 kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch delete mode 100644 kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch delete mode 100644 kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch delete mode 100644 kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch delete mode 100644 kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch delete mode 100644 kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch delete mode 100644 kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch delete mode 100644 kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch delete mode 100644 kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch delete mode 100644 kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch delete mode 100644 kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch delete mode 100644 kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch delete mode 100644 kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch delete mode 100644 kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch delete mode 100644 kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch delete mode 100644 kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch delete mode 100644 kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch delete mode 100644 kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch delete mode 100644 kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch delete mode 100644 kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch delete mode 100644 kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch delete mode 100644 kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch delete mode 100644 kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch delete mode 100644 kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch delete mode 100644 kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch delete mode 100644 kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch delete mode 100644 kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch delete mode 100644 kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch delete mode 100644 kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch delete mode 100644 kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch delete mode 100644 kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch delete mode 100644 kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch delete mode 100644 kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch delete mode 100644 kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch delete mode 100644 kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch delete mode 100644 kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch delete mode 100644 kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch delete mode 100644 kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch delete mode 100644 kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch delete mode 100644 kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch delete mode 100644 kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch delete mode 100644 kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch delete mode 100644 kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch delete mode 100644 kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch delete mode 100644 kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch delete mode 100644 kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch delete mode 100644 kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch delete mode 100644 kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch delete mode 100644 kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch delete mode 100644 kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch delete mode 100644 kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch delete mode 100644 kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch delete mode 100644 kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch delete mode 100644 kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch delete mode 100644 kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch delete mode 100644 kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch delete mode 100644 kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch delete mode 100644 kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch delete mode 100644 kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch delete mode 100644 kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch delete mode 100644 kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch delete mode 100644 kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch delete mode 100644 kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch delete mode 100644 kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch delete mode 100644 kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch delete mode 100644 kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch delete mode 100644 kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch delete mode 100644 kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch delete mode 100644 kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch delete mode 100644 kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch delete mode 100644 kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch delete mode 100644 kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch delete mode 100644 kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch delete mode 100644 kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch delete mode 100644 kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch delete mode 100644 kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch delete mode 100644 kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch delete mode 100644 kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch delete mode 100644 kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch delete mode 100644 kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch delete mode 100644 kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch delete mode 100644 kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch delete mode 100644 kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch delete mode 100644 kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch delete mode 100644 kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch delete mode 100644 kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch delete mode 100644 kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch delete mode 100644 kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch delete mode 100644 kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch delete mode 100644 kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch delete mode 100644 kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch delete mode 100644 kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch delete mode 100644 kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch delete mode 100644 kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch delete mode 100644 kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch delete mode 100644 kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch delete mode 100644 kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch delete mode 100644 kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch delete mode 100644 kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch delete mode 100644 kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch delete mode 100644 kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch delete mode 100644 kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch delete mode 100644 kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch delete mode 100644 kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch delete mode 100644 kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch delete mode 100644 kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch delete mode 100644 kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0246-arch-arm64-Add-lazy-preempt-support.patch delete mode 100644 kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch delete mode 100644 kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch delete mode 100644 kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch delete mode 100644 kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch delete mode 100644 kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch delete mode 100644 kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch delete mode 100644 kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch delete mode 100644 kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch delete mode 100644 kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch delete mode 100644 kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch delete mode 100644 kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch delete mode 100644 kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch delete mode 100644 kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch delete mode 100644 kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch delete mode 100644 kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch delete mode 100644 kernel/patches-4.19.x-rt/0265-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch delete mode 100644 kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch delete mode 100644 kernel/patches-4.19.x-rt/0268-kthread-add-a-global-worker-thread.patch delete mode 100644 kernel/patches-4.19.x-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch delete mode 100644 kernel/patches-4.19.x-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch delete mode 100644 kernel/patches-4.19.x-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch delete mode 100644 kernel/patches-4.19.x-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch delete mode 100644 kernel/patches-4.19.x-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch delete mode 100644 kernel/patches-4.19.x-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch delete mode 100644 kernel/patches-4.19.x-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch delete mode 100644 kernel/patches-4.19.x-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch delete mode 100644 kernel/patches-4.19.x-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch delete mode 100644 kernel/patches-4.19.x-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch delete mode 100644 kernel/patches-4.19.x-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch delete mode 100644 kernel/patches-4.19.x-rt/0281-futex-Delay-deallocation-of-pi_state.patch delete mode 100644 kernel/patches-4.19.x-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch delete mode 100644 kernel/patches-4.19.x-rt/0283-revert-aio.patch delete mode 100644 kernel/patches-4.19.x-rt/0284-fs-aio-simple-simple-work.patch delete mode 100644 kernel/patches-4.19.x-rt/0285-revert-thermal.patch delete mode 100644 kernel/patches-4.19.x-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch delete mode 100644 kernel/patches-4.19.x-rt/0287-revert-block.patch delete mode 100644 kernel/patches-4.19.x-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch delete mode 100644 kernel/patches-4.19.x-rt/0289-workqueue-rework.patch delete mode 100644 kernel/patches-4.19.x-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch delete mode 100644 kernel/patches-4.19.x-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch delete mode 100644 kernel/patches-4.19.x-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch delete mode 100644 kernel/patches-4.19.x-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch delete mode 100644 kernel/patches-4.19.x-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch delete mode 100644 kernel/patches-4.19.x-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch delete mode 100644 kernel/patches-4.19.x-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch delete mode 100644 kernel/patches-4.19.x-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch delete mode 100644 kernel/patches-4.19.x-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch delete mode 100644 kernel/patches-4.19.x-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch delete mode 100644 kernel/patches-4.19.x-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch delete mode 100644 kernel/patches-4.19.x-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch delete mode 100644 kernel/patches-4.19.x-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch delete mode 100644 kernel/patches-4.19.x-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch delete mode 100644 kernel/patches-4.19.x-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch delete mode 100644 kernel/patches-4.19.x-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch delete mode 100644 kernel/patches-4.19.x-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch delete mode 100644 kernel/patches-4.19.x-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch delete mode 100644 kernel/patches-4.19.x-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch delete mode 100644 kernel/patches-4.19.x-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch delete mode 100644 kernel/patches-4.19.x-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch delete mode 100644 kernel/patches-4.19.x-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch delete mode 100644 kernel/patches-4.19.x-rt/0312-sched-Lazy-migrate_disable-processing.patch delete mode 100644 kernel/patches-4.19.x-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch delete mode 100644 kernel/patches-4.19.x-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch delete mode 100644 kernel/patches-4.19.x-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch delete mode 100644 kernel/patches-4.19.x-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch delete mode 100644 kernel/patches-4.19.x-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch delete mode 100644 kernel/patches-4.19.x-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch delete mode 100644 kernel/patches-4.19.x-rt/0319-userfaultfd-Use-a-seqlock-instead-of-seqcount.patch delete mode 100644 kernel/patches-4.19.x-rt/0320-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch delete mode 100644 kernel/patches-4.19.x-rt/0321-sched-migrate_enable-Remove-__schedule-call.patch delete mode 100644 kernel/patches-4.19.x-rt/0322-mm-memcontrol-Move-misplaced-local_unlock_irqrestore.patch delete mode 100644 kernel/patches-4.19.x-rt/0323-locallock-Include-header-for-the-current-macro.patch delete mode 100644 kernel/patches-4.19.x-rt/0324-drm-vmwgfx-Drop-preempt_disable-in-vmw_fifo_ping_hos.patch delete mode 100644 kernel/patches-4.19.x-rt/0325-tracing-make-preempt_lazy-and-migrate_disable-counte.patch delete mode 100644 kernel/patches-4.19.x-rt/0326-lib-ubsan-Remove-flags-parameter-from-calls-to-ubsan.patch delete mode 100644 kernel/patches-4.19.x-rt/0327-irq_work-Fix-checking-of-IRQ_WORK_LAZY-flag-set-on-n.patch delete mode 100644 kernel/patches-4.19.x-rt/0328-Linux-4.19.106-rt46-REBASE.patch diff --git a/kernel/Makefile b/kernel/Makefile index 2f351c6e6..f442c91eb 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -256,13 +256,11 @@ $(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.28,5.4.x,-rt,)) $(eval $(call kernel,4.19.113,4.19.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,4.19.113,4.19.x,,-dbg)) -$(eval $(call kernel,4.19.106,4.19.x,-rt,)) $(eval $(call kernel,4.14.174,4.14.x,$(EXTRA),$(DEBUG))) else ifeq ($(ARCH),aarch64) $(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.28,5.4.x,-rt,)) -$(eval $(call kernel,4.19.106,4.19.x,-rt,)) else ifeq ($(ARCH),s390x) $(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG))) diff --git a/kernel/config-4.19.x-aarch64 b/kernel/config-4.19.x-aarch64 deleted file mode 100644 index bec150ace..000000000 --- a/kernel/config-4.19.x-aarch64 +++ /dev/null @@ -1,4550 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.19.113 Kernel Configuration -# - -# -# Compiler: gcc (Alpine 8.3.0) 8.3.0 -# -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=80300 -CONFIG_CLANG_VERSION=0 -CONFIG_CC_HAS_ASM_GOTO=y -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-linuxkit" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_HANDLE_DOMAIN_IRQ=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -CONFIG_GENERIC_IRQ_MULTI_HANDLER=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# CONFIG_PREEMPT_NONE is not set -# CONFIG_PREEMPT_VOLUNTARY is not set -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_SRCU=y -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_BUILD_BIN2C=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=17 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -# CONFIG_RD_BZIP2 is not set -# CONFIG_RD_LZMA is not set -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set -CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_PRINTK_NMI=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT_ALWAYS_ON=y -# CONFIG_USERFAULTFD is not set -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_RSEQ=y -# CONFIG_DEBUG_RSEQ is not set -# CONFIG_EMBEDDED is not set -CONFIG_HAVE_PERF_EVENTS=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SLOB is not set -CONFIG_SLAB_MERGE_DEFAULT=y -CONFIG_SLAB_FREELIST_RANDOM=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -CONFIG_ARM64=y -CONFIG_64BIT=y -CONFIG_MMU=y -CONFIG_ARM64_PAGE_SHIFT=12 -CONFIG_ARM64_CONT_SHIFT=4 -CONFIG_ARCH_MMAP_RND_BITS_MIN=18 -CONFIG_ARCH_MMAP_RND_BITS_MAX=33 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA32=y -CONFIG_HAVE_GENERIC_GUP=y -CONFIG_SMP=y -CONFIG_KERNEL_MODE_NEON=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_PROC_KCORE_TEXT=y - -# -# Platform selection -# -# CONFIG_ARCH_ACTIONS is not set -# CONFIG_ARCH_SUNXI is not set -# CONFIG_ARCH_ALPINE is not set -CONFIG_ARCH_BCM2835=y -# CONFIG_ARCH_BCM_IPROC is not set -# CONFIG_ARCH_BERLIN is not set -# CONFIG_ARCH_BRCMSTB is not set -CONFIG_ARCH_EXYNOS=y -# CONFIG_ARCH_K3 is not set -CONFIG_ARCH_LAYERSCAPE=y -# CONFIG_ARCH_LG1K is not set -CONFIG_ARCH_HISI=y -# CONFIG_ARCH_MEDIATEK is not set -# CONFIG_ARCH_MESON is not set -# CONFIG_ARCH_MVEBU is not set -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_REALTEK is not set -# CONFIG_ARCH_ROCKCHIP is not set -CONFIG_ARCH_SEATTLE=y -CONFIG_ARCH_SYNQUACER=y -# CONFIG_ARCH_RENESAS is not set -# CONFIG_ARCH_STRATIX10 is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_SPRD is not set -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_THUNDER2=y -# CONFIG_ARCH_UNIPHIER is not set -CONFIG_ARCH_VEXPRESS=y -# CONFIG_ARCH_XGENE is not set -# CONFIG_ARCH_ZX is not set -# CONFIG_ARCH_ZYNQMP is not set - -# -# Bus support -# -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -# CONFIG_PCIEAER is not set -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -# CONFIG_PCIE_PTM is not set -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -CONFIG_PCI_REALLOC_ENABLE_AUTO=y -CONFIG_PCI_STUB=y -# CONFIG_PCI_PF_STUB is not set -CONFIG_PCI_ATS=y -CONFIG_PCI_ECAM=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -CONFIG_PCI_LABEL=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# - -# -# Cadence PCIe controllers support -# -# CONFIG_PCIE_CADENCE_HOST is not set -# CONFIG_PCI_FTPCI100 is not set -CONFIG_PCI_HOST_COMMON=y -CONFIG_PCI_HOST_GENERIC=y -# CONFIG_PCIE_XILINX is not set -# CONFIG_PCI_XGENE is not set -CONFIG_PCI_HOST_THUNDER_PEM=y -CONFIG_PCI_HOST_THUNDER_ECAM=y - -# -# DesignWare PCI Core Support -# -CONFIG_PCIE_DW=y -CONFIG_PCIE_DW_HOST=y -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCI_LAYERSCAPE is not set -CONFIG_PCI_HISI=y -CONFIG_PCIE_QCOM=y -# CONFIG_PCIE_KIRIN is not set -CONFIG_PCIE_HISI_STB=y - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set - -# -# Kernel Features -# - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_834220=y -CONFIG_ARM64_ERRATUM_843419=y -CONFIG_ARM64_ERRATUM_1024718=y -CONFIG_ARM64_ERRATUM_1463225=y -CONFIG_CAVIUM_ERRATUM_22375=y -CONFIG_CAVIUM_ERRATUM_23154=y -CONFIG_CAVIUM_ERRATUM_27456=y -CONFIG_CAVIUM_ERRATUM_30115=y -CONFIG_QCOM_FALKOR_ERRATUM_1003=y -CONFIG_QCOM_FALKOR_ERRATUM_1009=y -CONFIG_QCOM_QDF2400_ERRATUM_0065=y -CONFIG_SOCIONEXT_SYNQUACER_PREITS=y -CONFIG_HISILICON_ERRATUM_161600802=y -CONFIG_QCOM_FALKOR_ERRATUM_E1041=y -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_16K_PAGES is not set -# CONFIG_ARM64_64K_PAGES is not set -# CONFIG_ARM64_VA_BITS_39 is not set -CONFIG_ARM64_VA_BITS_48=y -CONFIG_ARM64_VA_BITS=48 -CONFIG_ARM64_PA_BITS_48=y -CONFIG_ARM64_PA_BITS=48 -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_SCHED_MC=y -# CONFIG_SCHED_SMT is not set -CONFIG_NR_CPUS=64 -CONFIG_HOTPLUG_CPU=y -# CONFIG_NUMA is not set -CONFIG_HOLES_IN_ZONE=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_FLATMEM_ENABLE=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_SYS_SUPPORTS_HUGETLBFS=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_SECCOMP=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set -# CONFIG_CRASH_DUMP is not set -CONFIG_XEN_DOM0=y -CONFIG_XEN=y -CONFIG_FORCE_MAX_ZONEORDER=11 -CONFIG_UNMAP_KERNEL_AT_EL0=y -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_HARDEN_EL2_VECTORS=y -CONFIG_ARM64_SSBD=y -# CONFIG_ARM64_SW_TTBR0_PAN is not set - -# -# ARMv8.1 architectural features -# -CONFIG_ARM64_HW_AFDBM=y -CONFIG_ARM64_PAN=y -# CONFIG_ARM64_LSE_ATOMICS is not set -CONFIG_ARM64_VHE=y - -# -# ARMv8.2 architectural features -# -CONFIG_ARM64_UAO=y -# CONFIG_ARM64_PMEM is not set -CONFIG_ARM64_RAS_EXTN=y -CONFIG_ARM64_SVE=y -CONFIG_ARM64_MODULE_PLTS=y -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_RANDOMIZE_MODULE_REGION_FULL=y - -# -# Boot options -# -# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set -CONFIG_CMDLINE="" -# CONFIG_CMDLINE_FORCE is not set -CONFIG_EFI_STUB=y -CONFIG_EFI=y -CONFIG_DMI=y -# CONFIG_COMPAT is not set - -# -# Power management options -# -# CONFIG_SUSPEND is not set -# CONFIG_HIBERNATION is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -CONFIG_PM_GENERIC_DOMAINS=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_CPU_PM=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -CONFIG_DT_IDLE_STATES=y - -# -# ARM CPU Idle Drivers -# -CONFIG_ARM_CPUIDLE=y - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -# CONFIG_CPU_FREQ_STAT is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -CONFIG_CPUFREQ_DT=y -CONFIG_CPUFREQ_DT_PLATDEV=y -# CONFIG_ACPI_CPPC_CPUFREQ is not set -CONFIG_ARM_BIG_LITTLE_CPUFREQ=y -CONFIG_ARM_DT_BL_CPUFREQ=y -# CONFIG_QORIQ_CPUFREQ is not set - -# -# Firmware Drivers -# -CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_PSCI_CHECKER is not set -# CONFIG_ARM_SCMI_PROTOCOL is not set -# CONFIG_ARM_SCPI_PROTOCOL is not set -# CONFIG_ARM_SDE_INTERFACE is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -# CONFIG_ISCSI_IBFT is not set -CONFIG_RASPBERRYPI_FIRMWARE=y -# CONFIG_FW_CFG_SYSFS is not set -CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_VARS=y -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=y -# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_ARMSTUB=y -CONFIG_EFI_ARMSTUB_DTB_LOADER=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -CONFIG_RESET_ATTACK_MITIGATION=y - -# -# Tegra firmware driver -# -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_CCA_REQUIRED=y -CONFIG_ACPI_DEBUGGER=y -# CONFIG_ACPI_DEBUGGER_USER is not set -CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_EC_DEBUGFS=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_FAN=y -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_MCFG=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_THERMAL=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_DEBUG=y -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=y -# CONFIG_ACPI_BGRT is not set -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -CONFIG_HAVE_ACPI_APEI=y -# CONFIG_ACPI_APEI is not set -CONFIG_PMIC_OPREGION=y -CONFIG_ACPI_CONFIGFS=y -CONFIG_ACPI_IORT=y -CONFIG_ACPI_GTDT=y -CONFIG_ACPI_PPTT=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y -CONFIG_IRQ_BYPASS_MANAGER=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -CONFIG_KVM_ARM_HOST=y -CONFIG_KVM_ARM_PMU=y -CONFIG_KVM_INDIRECT_VECTORS=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=y -CONFIG_CRYPTO_SHA512_ARM64=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -# CONFIG_CRYPTO_SHA512_ARM64_CE is not set -# CONFIG_CRYPTO_SHA3_ARM64 is not set -# CONFIG_CRYPTO_SM3_ARM64_CE is not set -# CONFIG_CRYPTO_SM4_ARM64_CE is not set -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=y -CONFIG_CRYPTO_CRC32_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -CONFIG_CRYPTO_CHACHA20_NEON=y -CONFIG_CRYPTO_AES_ARM64_BS=y - -# -# General architecture-dependent options -# -CONFIG_CRASH_CORE=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_NMI=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_CC_HAS_STACKPROTECTOR_NONE=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_CLONE_BACKWARDS=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_REFCOUNT_FULL=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_PLUGIN_HOSTCC="g++" -CONFIG_HAVE_GCC_PLUGINS=y -CONFIG_GCC_PLUGINS=y -# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -CONFIG_GCC_PLUGIN_STRUCTLEAK=y -CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y -# CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE is not set -CONFIG_GCC_PLUGIN_RANDSTRUCT=y -CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y -# CONFIG_MODULE_SRCVERSION_ALL is not set -# CONFIG_MODULE_SIG is not set -# CONFIG_MODULE_COMPRESS is not set -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_SCSI_REQUEST=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_ZONED is not set -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_CMDLINE_PARSER is not set -# CONFIG_BLK_WBT is not set -# CONFIG_BLK_CGROUP_IOLATENCY is not set -CONFIG_BLK_DEBUG_FS=y -# CONFIG_BLK_SED_OPAL is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_DEFAULT_DEADLINE=y -# CONFIG_DEFAULT_CFQ is not set -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="deadline" -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -# CONFIG_IOSCHED_BFQ is not set -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_ELFCORE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=y -CONFIG_COREDUMP=y - -# -# Memory Management options -# -CONFIG_SELECT_MEMORY_MODEL=y -# CONFIG_FLATMEM_MANUAL is not set -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_NO_BOOTMEM=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -# CONFIG_MEMORY_FAILURE is not set -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_TRANSPARENT_HUGE_PAGECACHE=y -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -# CONFIG_CMA is not set -# CONFIG_ZPOOL is not set -# CONFIG_ZBUD is not set -# CONFIG_ZSMALLOC is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -# CONFIG_IDLE_PAGE_TRACKING is not set -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_BENCHMARK is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=y -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=y -# CONFIG_TLS is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=m -CONFIG_XFRM_USER=m -# CONFIG_XFRM_INTERFACE is not set -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_XDP_SOCKETS=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -# CONFIG_IP_PNP_BOOTP is not set -# CONFIG_IP_PNP_RARP is not set -CONFIG_NET_IPIP=y -CONFIG_NET_IPGRE_DEMUX=y -CONFIG_NET_IP_TUNNEL=y -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=y -CONFIG_NET_FOU=y -CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_INET_ESP_OFFLOAD is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=y -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -CONFIG_INET_UDP_DIAG=y -# CONFIG_INET_RAW_DIAG is not set -# CONFIG_INET_DIAG_DESTROY is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -# CONFIG_IPV6_ROUTE_INFO is not set -# CONFIG_IPV6_OPTIMISTIC_DAD is not set -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -# CONFIG_INET6_ESP_OFFLOAD is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_IPV6_ILA=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=y -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=y -CONFIG_IPV6_GRE=m -CONFIG_IPV6_FOU=y -CONFIG_IPV6_FOU_TUNNEL=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -# CONFIG_IPV6_MROUTE is not set -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -CONFIG_NETLABEL=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=y -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_NETLINK_ACCT=y -CONFIG_NETFILTER_NETLINK_QUEUE=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NETFILTER_NETLINK_OSF=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_LOG_COMMON=y -# CONFIG_NF_LOG_NETDEV is not set -CONFIG_NETFILTER_CONNCOUNT=y -CONFIG_NF_CONNTRACK_MARK=y -# CONFIG_NF_CONNTRACK_SECMARK is not set -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_BROADCAST=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NF_CT_NETLINK_TIMEOUT=y -CONFIG_NF_CT_NETLINK_HELPER=y -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -CONFIG_NF_NAT_SIP=y -CONFIG_NF_NAT_TFTP=y -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NETFILTER_SYNPROXY=y -CONFIG_NF_TABLES=y -# CONFIG_NF_TABLES_SET is not set -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -# CONFIG_NFT_NUMGEN is not set -CONFIG_NFT_CT=y -CONFIG_NFT_COUNTER=y -CONFIG_NFT_CONNLIMIT=y -CONFIG_NFT_LOG=y -CONFIG_NFT_LIMIT=y -CONFIG_NFT_MASQ=y -CONFIG_NFT_REDIR=y -CONFIG_NFT_NAT=y -CONFIG_NFT_TUNNEL=y -# CONFIG_NFT_OBJREF is not set -CONFIG_NFT_QUEUE=y -# CONFIG_NFT_QUOTA is not set -CONFIG_NFT_REJECT=y -CONFIG_NFT_REJECT_INET=y -CONFIG_NFT_COMPAT=y -CONFIG_NFT_HASH=y -# CONFIG_NFT_SOCKET is not set -CONFIG_NFT_OSF=y -CONFIG_NFT_TPROXY=y -CONFIG_NF_DUP_NETDEV=y -CONFIG_NFT_DUP_NETDEV=y -CONFIG_NFT_FWD_NETDEV=y -# CONFIG_NF_FLOW_TABLE is not set -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=y -CONFIG_NETFILTER_XT_CONNMARK=y -CONFIG_NETFILTER_XT_SET=y - -# -# Xtables targets -# -# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CT=y -CONFIG_NETFILTER_XT_TARGET_DSCP=y -CONFIG_NETFILTER_XT_TARGET_HL=y -CONFIG_NETFILTER_XT_TARGET_HMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_NAT=y -CONFIG_NETFILTER_XT_TARGET_NETMAP=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_RATEEST=y -CONFIG_NETFILTER_XT_TARGET_REDIRECT=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_CGROUP=y -CONFIG_NETFILTER_XT_MATCH_CLUSTER=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_CPU=y -CONFIG_NETFILTER_XT_MATCH_DCCP=y -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ECN=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_HL=y -CONFIG_NETFILTER_XT_MATCH_IPCOMP=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_IPVS=y -CONFIG_NETFILTER_XT_MATCH_L2TP=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_NFACCT=y -CONFIG_NETFILTER_XT_MATCH_OSF=y -CONFIG_NETFILTER_XT_MATCH_OWNER=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_RATEEST=y -CONFIG_NETFILTER_XT_MATCH_REALM=y -CONFIG_NETFILTER_XT_MATCH_RECENT=y -CONFIG_NETFILTER_XT_MATCH_SCTP=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_IP_SET=y -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -# CONFIG_IP_SET_HASH_IPMARK is not set -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -# CONFIG_IP_SET_HASH_IPMAC is not set -# CONFIG_IP_SET_HASH_MAC is not set -# CONFIG_IP_SET_HASH_NETPORTNET is not set -CONFIG_IP_SET_HASH_NET=y -# CONFIG_IP_SET_HASH_NETNET is not set -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_DEBUG=y -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_LC=y -CONFIG_IP_VS_WLC=y -CONFIG_IP_VS_FO=y -CONFIG_IP_VS_OVF=y -CONFIG_IP_VS_LBLC=y -CONFIG_IP_VS_LBLCR=y -CONFIG_IP_VS_DH=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_MH=y -CONFIG_IP_VS_SED=y -CONFIG_IP_VS_NQ=y - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=y -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=y - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_NF_TPROXY_IPV4=y -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_CHAIN_ROUTE_IPV4=y -CONFIG_NFT_REJECT_IPV4=y -CONFIG_NFT_DUP_IPV4=y -# CONFIG_NFT_FIB_IPV4 is not set -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=y -CONFIG_NF_LOG_ARP=y -CONFIG_NF_LOG_IPV4=y -CONFIG_NF_REJECT_IPV4=y -CONFIG_NF_NAT_IPV4=y -CONFIG_NF_NAT_MASQUERADE_IPV4=y -CONFIG_NFT_CHAIN_NAT_IPV4=y -CONFIG_NFT_MASQ_IPV4=y -CONFIG_NFT_REDIR_IPV4=y -CONFIG_NF_NAT_SNMP_BASIC=y -CONFIG_NF_NAT_PROTO_GRE=y -CONFIG_NF_NAT_PPTP=y -CONFIG_NF_NAT_H323=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_TARGET_SYNPROXY=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_TARGET_CLUSTERIP=y -CONFIG_IP_NF_TARGET_ECN=y -CONFIG_IP_NF_TARGET_TTL=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=y -CONFIG_NF_TPROXY_IPV6=y -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_CHAIN_ROUTE_IPV6=y -CONFIG_NFT_CHAIN_NAT_IPV6=y -CONFIG_NFT_MASQ_IPV6=y -CONFIG_NFT_REDIR_IPV6=y -CONFIG_NFT_REJECT_IPV6=y -CONFIG_NFT_DUP_IPV6=y -# CONFIG_NFT_FIB_IPV6 is not set -CONFIG_NF_DUP_IPV6=y -CONFIG_NF_REJECT_IPV6=y -CONFIG_NF_LOG_IPV6=y -CONFIG_NF_NAT_IPV6=y -CONFIG_NF_NAT_MASQUERADE_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_AH=y -CONFIG_IP6_NF_MATCH_EUI64=y -CONFIG_IP6_NF_MATCH_FRAG=y -CONFIG_IP6_NF_MATCH_OPTS=y -CONFIG_IP6_NF_MATCH_HL=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_MATCH_MH=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_MATCH_RT=y -# CONFIG_IP6_NF_MATCH_SRH is not set -CONFIG_IP6_NF_TARGET_HL=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_TARGET_SYNPROXY=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_IP6_NF_SECURITY=y -CONFIG_IP6_NF_NAT=y -CONFIG_IP6_NF_TARGET_MASQUERADE=y -CONFIG_IP6_NF_TARGET_NPT=y -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_TABLES_BRIDGE=y -CONFIG_NFT_BRIDGE_REJECT=y -CONFIG_NF_LOG_BRIDGE=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_802_3=y -CONFIG_BRIDGE_EBT_AMONG=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_LIMIT=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_PKTTYPE=y -CONFIG_BRIDGE_EBT_STP=y -CONFIG_BRIDGE_EBT_VLAN=y -CONFIG_BRIDGE_EBT_ARPREPLY=y -CONFIG_BRIDGE_EBT_DNAT=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_REDIRECT=y -CONFIG_BRIDGE_EBT_SNAT=y -CONFIG_BRIDGE_EBT_LOG=y -CONFIG_BRIDGE_EBT_NFLOG=y -CONFIG_BPFILTER=y -CONFIG_BPFILTER_UMH=m -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set -CONFIG_INET_SCTP_DIAG=m -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -CONFIG_L2TP=m -# CONFIG_L2TP_DEBUGFS is not set -# CONFIG_L2TP_V3 is not set -CONFIG_STP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=y -# CONFIG_VLAN_8021Q_GVRP is not set -# CONFIG_VLAN_8021Q_MVRP is not set -# CONFIG_DECNET is not set -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -# CONFIG_NET_SCH_CODEL is not set -# CONFIG_NET_SCH_FQ_CODEL is not set -# CONFIG_NET_SCH_CAKE is not set -# CONFIG_NET_SCH_FQ is not set -# CONFIG_NET_SCH_HHF is not set -# CONFIG_NET_SCH_PIE is not set -CONFIG_NET_SCH_INGRESS=m -# CONFIG_NET_SCH_PLUG is not set -# CONFIG_NET_SCH_DEFAULT is not set - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_TCINDEX=y -CONFIG_NET_CLS_ROUTE4=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=y -CONFIG_NET_CLS_RSVP6=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=y -# CONFIG_NET_CLS_FLOWER is not set -CONFIG_NET_CLS_MATCHALL=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_EMATCH_IPSET=y -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=y -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=y -# CONFIG_NET_ACT_SAMPLE is not set -CONFIG_NET_ACT_IPT=y -CONFIG_NET_ACT_NAT=y -CONFIG_NET_ACT_PEDIT=y -CONFIG_NET_ACT_SIMP=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_NET_ACT_CSUM=y -# CONFIG_NET_ACT_VLAN is not set -CONFIG_NET_ACT_BPF=y -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_SKBMOD is not set -# CONFIG_NET_ACT_IFE is not set -# CONFIG_NET_ACT_TUNNEL_KEY is not set -CONFIG_NET_CLS_IND=y -CONFIG_NET_SCH_FIFO=y -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_NETLINK_DIAG=y -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=m -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_NSH=m -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_QRTR is not set -# CONFIG_NET_NCSI is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_JIT=y -# CONFIG_BPF_STREAM_PARSER is not set -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -CONFIG_NET_9P=y -CONFIG_NET_9P_VIRTIO=y -# CONFIG_NET_9P_XEN is not set -# CONFIG_NET_9P_DEBUG is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set -# CONFIG_NFC is not set -# CONFIG_PSAMPLE is not set -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -# CONFIG_NET_DEVLINK is not set -CONFIG_MAY_USE_DEVLINK=y -CONFIG_PAGE_POOL=y -CONFIG_FAILOVER=y -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# -CONFIG_ARM_AMBA=y - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -# CONFIG_DEVTMPFS_MOUNT is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_FW_LOADER_USER_HELPER is not set -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_SYS_HYPERVISOR=y -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=y -CONFIG_REGMAP_MMIO=y -CONFIG_GENERIC_ARCH_TOPOLOGY=y - -# -# Bus devices -# -CONFIG_ARM_CCI=y -CONFIG_ARM_CCI400_COMMON=y -# CONFIG_BRCMSTB_GISB_ARB is not set -# CONFIG_HISILICON_LPC is not set -CONFIG_QCOM_EBI2=y -# CONFIG_SIMPLE_PM_BUS is not set -CONFIG_VEXPRESS_CONFIG=y -# CONFIG_FSL_MC_BUS is not set -# CONFIG_CONNECTOR is not set -# CONFIG_GNSS is not set -# CONFIG_MTD is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_KOBJ=y -CONFIG_OF_DYNAMIC=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_IRQ=y -CONFIG_OF_NET=y -CONFIG_OF_MDIO=m -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_RESOLVE=y -CONFIG_OF_OVERLAY=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -CONFIG_CDROM=y -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -CONFIG_BLK_DEV_CRYPTOLOOP=y -CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set -CONFIG_BLK_DEV_NBD=y -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_SX8 is not set -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_CDROM_PKTCDVD is not set -CONFIG_ATA_OVER_ETH=m -CONFIG_XEN_BLKDEV_FRONTEND=y -CONFIG_VIRTIO_BLK=y -# CONFIG_VIRTIO_BLK_SCSI is not set -CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_RSXX is not set - -# -# NVME Support -# -CONFIG_NVME_CORE=y -CONFIG_BLK_DEV_NVME=y -# CONFIG_NVME_MULTIPATH is not set -# CONFIG_NVME_FC is not set -# CONFIG_NVME_TARGET is not set - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_USB_SWITCH_FSA9480 is not set -# CONFIG_SRAM is not set -CONFIG_VEXPRESS_SYSCFG=y -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_I2C is not set -# CONFIG_ALTERA_STAPL is not set - -# -# Intel MIC & related support -# - -# -# Intel MIC Bus Driver -# - -# -# SCIF Bus Driver -# - -# -# VOP Bus Driver -# - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# - -# -# VOP Driver -# -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_MISC_RTSX_PCI is not set -# CONFIG_MISC_RTSX_USB is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -# CONFIG_SCSI_MQ_DEFAULT is not set -# CONFIG_SCSI_PROC_FS is not set - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -CONFIG_BLK_DEV_SR=y -# CONFIG_BLK_DEV_SR_VENDOR is not set -CONFIG_CHR_DEV_SG=y -# CONFIG_CHR_DEV_SCH is not set -# CONFIG_SCSI_CONSTANTS is not set -# CONFIG_SCSI_LOGGING is not set -# CONFIG_SCSI_SCAN_ASYNC is not set - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=y -# CONFIG_SCSI_FC_ATTRS is not set -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y -# CONFIG_SCSI_SAS_ATA is not set -CONFIG_SCSI_SAS_HOST_SMP=y -# CONFIG_SCSI_SRP_ATTRS is not set -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -# CONFIG_ISCSI_BOOT_SYSFS is not set -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -CONFIG_SCSI_HISI_SAS=y -# CONFIG_SCSI_HISI_SAS_PCI is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=y -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_MPT2SAS is not set -CONFIG_SCSI_SMARTPQI=m -# CONFIG_SCSI_UFSHCD is not set -# CONFIG_SCSI_HPTIOP is not set -CONFIG_XEN_SCSI_FRONTEND=y -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -# CONFIG_SCSI_QLA_ISCSI is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -CONFIG_SCSI_VIRTIO=y -# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set -# CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set -CONFIG_HAVE_PATA_PLATFORM=y -CONFIG_ATA=y -# CONFIG_ATA_VERBOSE_ERROR is not set -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -# CONFIG_SATA_PMP is not set - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_AHCI_CEVA is not set -# CONFIG_AHCI_QORIQ is not set -CONFIG_SATA_AHCI_SEATTLE=y -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -# CONFIG_ATA_PIIX is not set -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -CONFIG_PATA_SIS=y -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -CONFIG_PATA_PLATFORM=y -CONFIG_PATA_OF_PLATFORM=y -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -CONFIG_PATA_ACPI=y -CONFIG_ATA_GENERIC=y -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -# CONFIG_BLK_DEV_MD is not set -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_MQ_DEFAULT is not set -# CONFIG_DM_DEBUG is not set -CONFIG_DM_BUFIO=y -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=y -CONFIG_DM_PERSISTENT_DATA=y -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=y -# CONFIG_DM_CACHE is not set -# CONFIG_DM_WRITECACHE is not set -# CONFIG_DM_ERA is not set -# CONFIG_DM_MIRROR is not set -# CONFIG_DM_RAID is not set -# CONFIG_DM_ZERO is not set -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_DELAY is not set -# CONFIG_DM_UEVENT is not set -# CONFIG_DM_FLAKEY is not set -# CONFIG_DM_VERITY is not set -# CONFIG_DM_SWITCH is not set -# CONFIG_DM_LOG_WRITES is not set -# CONFIG_DM_INTEGRITY is not set -# CONFIG_TARGET_CORE is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=y -# CONFIG_FUSION_SAS is not set -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -# CONFIG_FUSION_LOGGING is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -# CONFIG_EQUALIZER is not set -# CONFIG_NET_FC is not set -# CONFIG_IFB is not set -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -CONFIG_IPVLAN=y -# CONFIG_IPVTAP is not set -CONFIG_VXLAN=y -CONFIG_GENEVE=m -# CONFIG_GTP is not set -# CONFIG_MACSEC is not set -# CONFIG_NETCONSOLE is not set -CONFIG_TUN=y -CONFIG_TAP=y -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=y -CONFIG_VIRTIO_NET=y -CONFIG_NLMON=y -# CONFIG_NET_VRF is not set -# CONFIG_VSOCKMON is not set -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_HWMON=y -# CONFIG_NET_VENDOR_BROCADE is not set -# CONFIG_NET_VENDOR_CADENCE is not set -CONFIG_NET_VENDOR_CAVIUM=y -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_RGX=m -CONFIG_CAVIUM_PTP=y -# CONFIG_LIQUIDIO is not set -# CONFIG_LIQUIDIO_VF is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_CISCO is not set -CONFIG_NET_VENDOR_CORTINA=y -# CONFIG_GEMINI_ETHERNET is not set -# CONFIG_DNET is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_FREESCALE is not set -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -CONFIG_HNS_MDIO=m -CONFIG_HNS=m -CONFIG_HNS_DSAF=m -CONFIG_HNS_ENET=m -# CONFIG_HNS3 is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_HUAWEI=y -# CONFIG_HINIC is not set -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m -CONFIG_IXGB=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBEVF=m -CONFIG_I40E=m -CONFIG_I40EVF=m -CONFIG_ICE=m -# CONFIG_FM10K is not set -# CONFIG_JME is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -CONFIG_MLX4_CORE_GEN2=y -CONFIG_MLX5_CORE=m -# CONFIG_MLX5_FPGA is not set -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_ESWITCH=y -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLXSW_CORE is not set -# CONFIG_MLXFW is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_NET_VENDOR_MICROSEMI=y -# CONFIG_MSCC_OCELOT_SWITCH is not set -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -CONFIG_NET_VENDOR_NETRONOME=y -CONFIG_NFP=m -CONFIG_NFP_APP_FLOWER=y -CONFIG_NFP_APP_ABM_NIC=y -# CONFIG_NFP_DEBUG is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_ETHOC is not set -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -# CONFIG_8139TOO is not set -# CONFIG_R8169 is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_NET_VENDOR_SMSC is not set -CONFIG_NET_VENDOR_SOCIONEXT=y -# CONFIG_SNI_NETSEC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_MDIO_DEVICE=m -CONFIG_MDIO_BUS=m -# CONFIG_MDIO_BCM_UNIMAC is not set -# CONFIG_MDIO_BITBANG is not set -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set -CONFIG_MDIO_CAVIUM=m -# CONFIG_MDIO_HISI_FEMAC is not set -# CONFIG_MDIO_MSCC_MIIM is not set -# CONFIG_MDIO_OCTEON is not set -CONFIG_MDIO_THUNDER=m -CONFIG_PHYLIB=m -CONFIG_SWPHY=y - -# -# MII PHY device drivers -# -# CONFIG_AMD_PHY is not set -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AX88796B_PHY is not set -# CONFIG_AT803X_PHY is not set -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_BROADCOM_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_CORTINA_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_DP83822_PHY is not set -# CONFIG_DP83TC811_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -CONFIG_FIXED_PHY=m -# CONFIG_ICPLUS_PHY is not set -# CONFIG_INTEL_XWAY_PHY is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_LXT_PHY is not set -# CONFIG_MARVELL_PHY is not set -# CONFIG_MARVELL_10G_PHY is not set -# CONFIG_MICREL_PHY is not set -CONFIG_MICROCHIP_PHY=m -# CONFIG_MICROCHIP_T1_PHY is not set -# CONFIG_MICROSEMI_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_QSEMI_PHY is not set -# CONFIG_REALTEK_PHY is not set -# CONFIG_RENESAS_PHY is not set -# CONFIG_ROCKCHIP_PHY is not set -# CONFIG_SMSC_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_TERANETICS_PHY is not set -# CONFIG_VITESSE_PHY is not set -# CONFIG_XILINX_GMII2RGMII is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_SLIP is not set -CONFIG_SLHC=m -CONFIG_USB_NET_DRIVERS=y -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_RTL8152 is not set -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=y -# CONFIG_USB_NET_AX8817X is not set -# CONFIG_USB_NET_AX88179_178A is not set -# CONFIG_USB_NET_CDCETHER is not set -# CONFIG_USB_NET_CDC_EEM is not set -# CONFIG_USB_NET_CDC_NCM is not set -# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -# CONFIG_USB_NET_CDC_MBIM is not set -# CONFIG_USB_NET_DM9601 is not set -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -# CONFIG_USB_NET_SMSC75XX is not set -CONFIG_USB_NET_SMSC95XX=m -# CONFIG_USB_NET_GL620A is not set -# CONFIG_USB_NET_NET1080 is not set -# CONFIG_USB_NET_PLUSB is not set -# CONFIG_USB_NET_MCS7830 is not set -# CONFIG_USB_NET_RNDIS_HOST is not set -# CONFIG_USB_NET_CDC_SUBSET is not set -# CONFIG_USB_NET_ZAURUS is not set -# CONFIG_USB_NET_CX82310_ETH is not set -# CONFIG_USB_NET_KALMIA is not set -# CONFIG_USB_NET_QMI_WWAN is not set -# CONFIG_USB_NET_INT51X1 is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_USB_SIERRA_NET is not set -# CONFIG_USB_NET_CH9200 is not set -CONFIG_WLAN=y -# CONFIG_WIRELESS_WDS is not set -CONFIG_WLAN_VENDOR_ADMTEK=y -CONFIG_WLAN_VENDOR_ATH=y -# CONFIG_ATH_DEBUG is not set -# CONFIG_ATH5K_PCI is not set -CONFIG_WLAN_VENDOR_ATMEL=y -CONFIG_WLAN_VENDOR_BROADCOM=y -CONFIG_WLAN_VENDOR_CISCO=y -CONFIG_WLAN_VENDOR_INTEL=y -CONFIG_WLAN_VENDOR_INTERSIL=y -# CONFIG_HOSTAP is not set -# CONFIG_PRISM54 is not set -CONFIG_WLAN_VENDOR_MARVELL=y -CONFIG_WLAN_VENDOR_MEDIATEK=y -CONFIG_WLAN_VENDOR_RALINK=y -CONFIG_WLAN_VENDOR_REALTEK=y -CONFIG_WLAN_VENDOR_RSI=y -CONFIG_WLAN_VENDOR_ST=y -CONFIG_WLAN_VENDOR_TI=y -CONFIG_WLAN_VENDOR_ZYDAS=y -CONFIG_WLAN_VENDOR_QUANTENNA=y - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -CONFIG_XEN_NETDEV_FRONTEND=y -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -# CONFIG_NETDEVSIM is not set -CONFIG_NET_FAILOVER=y -# CONFIG_ISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=y -CONFIG_INPUT_SPARSEKMAP=y -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=y -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CAP11XX is not set -# CONFIG_KEYBOARD_BCM is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_ATMEL_CAPTOUCH is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_GP2A is not set -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_DECODER is not set -# CONFIG_INPUT_ATI_REMOTE2 is not set -# CONFIG_INPUT_KEYSPAN_REMOTE is not set -# CONFIG_INPUT_KXTJ9 is not set -# CONFIG_INPUT_POWERMATE is not set -# CONFIG_INPUT_YEALINK is not set -# CONFIG_INPUT_CM109 is not set -CONFIG_INPUT_UINPUT=y -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_PWM_BEEPER is not set -# CONFIG_INPUT_PWM_VIBRA is not set -# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_CMA3000 is not set -CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -# CONFIG_INPUT_HISI_POWERKEY is not set -# CONFIG_RMI4_CORE is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_SERPORT=y -CONFIG_SERIO_AMBAKMI=y -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_SERIO_ARC_PS2 is not set -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_NOZOMI is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -CONFIG_LDISC_AUTOLOAD=y -CONFIG_DEVMEM=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=4 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -# CONFIG_SERIAL_8250_MANY_PORTS is not set -# CONFIG_SERIAL_8250_ASPEED_VUART is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -# CONFIG_SERIAL_8250_RSA is not set -CONFIG_SERIAL_8250_BCM2835AUX=y -CONFIG_SERIAL_8250_FSL=y -CONFIG_SERIAL_8250_DW=y -# CONFIG_SERIAL_8250_RT288X is not set -# CONFIG_SERIAL_8250_MOXA is not set -CONFIG_SERIAL_OF_PLATFORM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set -CONFIG_SERIAL_SAMSUNG=y -CONFIG_SERIAL_SAMSUNG_UARTS_4=y -CONFIG_SERIAL_SAMSUNG_UARTS=4 -CONFIG_SERIAL_SAMSUNG_CONSOLE=y -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_MSM is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -CONFIG_SERIAL_DEV_BUS=y -CONFIG_SERIAL_DEV_CTRL_TTYPORT=y -# CONFIG_TTY_PRINTK is not set -CONFIG_HVC_DRIVER=y -CONFIG_HVC_IRQ=y -CONFIG_HVC_XEN=y -CONFIG_HVC_XEN_FRONTEND=y -# CONFIG_HVC_DCC is not set -CONFIG_VIRTIO_CONSOLE=y -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=y -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -CONFIG_HW_RANDOM_BCM2835=y -CONFIG_HW_RANDOM_VIRTIO=y -CONFIG_HW_RANDOM_HISI=y -CONFIG_HW_RANDOM_CAVIUM=y -CONFIG_HW_RANDOM_EXYNOS=y -# CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# -# CONFIG_RAW_DRIVER is not set -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=m -CONFIG_TCG_TIS=m -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -CONFIG_TCG_XEN=m -CONFIG_TCG_CRB=y -CONFIG_TCG_VTPM_PROXY=m -CONFIG_TCG_TIS_ST33ZP24=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m -CONFIG_DEVPORT=y -# CONFIG_XILLYBUS is not set - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# -# Multiplexer I2C Chip support -# -# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set -# CONFIG_I2C_MUX_GPIO is not set -# CONFIG_I2C_MUX_GPMUX is not set -# CONFIG_I2C_MUX_LTC4306 is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -# CONFIG_I2C_MUX_PINCTRL is not set -# CONFIG_I2C_MUX_REG is not set -# CONFIG_I2C_DEMUX_PINCTRL is not set -# CONFIG_I2C_MUX_MLXCPLD is not set -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_HIX5HD2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -CONFIG_I2C_BCM2835=m -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_EXYNOS5 is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_IMX is not set -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_QUP is not set -# CONFIG_I2C_RK3X is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_SYNQUACER is not set -# CONFIG_I2C_VERSATILE is not set -CONFIG_I2C_THUNDERX=m -# CONFIG_I2C_XILINX is not set -# CONFIG_I2C_XLP9XX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y - -# -# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -# -CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_SX150X is not set -CONFIG_PINCTRL_BCM2835=y -# CONFIG_PINCTRL_APQ8064 is not set -# CONFIG_PINCTRL_APQ8084 is not set -# CONFIG_PINCTRL_IPQ4019 is not set -# CONFIG_PINCTRL_IPQ8064 is not set -# CONFIG_PINCTRL_IPQ8074 is not set -# CONFIG_PINCTRL_MSM8660 is not set -# CONFIG_PINCTRL_MSM8960 is not set -# CONFIG_PINCTRL_MDM9615 is not set -# CONFIG_PINCTRL_MSM8X74 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_MSM8998 is not set -# CONFIG_PINCTRL_QDF2XXX is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_SDM845 is not set -CONFIG_PINCTRL_SAMSUNG=y -CONFIG_PINCTRL_EXYNOS=y -CONFIG_PINCTRL_EXYNOS_ARM64=y -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -# CONFIG_GPIO_SYSFS is not set -CONFIG_GPIO_GENERIC=y - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -# CONFIG_GPIO_AMDPT is not set -CONFIG_GPIO_RASPBERRYPI_EXP=y -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_FTGPIO010 is not set -CONFIG_GPIO_GENERIC_PLATFORM=y -# CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_MB86S7X is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_MPC8XXX is not set -# CONFIG_GPIO_PL061 is not set -# CONFIG_GPIO_SYSCON is not set -CONFIG_GPIO_THUNDERX=m -# CONFIG_GPIO_XGENE is not set -# CONFIG_GPIO_XILINX is not set -# CONFIG_GPIO_XLP is not set - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set - -# -# MFD GPIO expanders -# - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set - -# -# USB GPIO expanders -# -# CONFIG_W1 is not set -# CONFIG_POWER_AVS is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMSTB is not set -# CONFIG_POWER_RESET_GPIO is not set -# CONFIG_POWER_RESET_GPIO_RESTART is not set -# CONFIG_POWER_RESET_HISI is not set -# CONFIG_POWER_RESET_MSM is not set -# CONFIG_POWER_RESET_LTC2952 is not set -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_RESET_VEXPRESS=y -# CONFIG_POWER_RESET_XGENE is not set -# CONFIG_POWER_RESET_SYSCON is not set -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_ISP1704 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_LTC3651 is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_CHARGER_RT9455 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_ASPEED is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_PWM_FAN is not set -CONFIG_SENSORS_RASPBERRYPI_HWMON=y -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS1015 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VEXPRESS is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -# CONFIG_THERMAL_GOV_FAIR_SHARE is not set -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -# CONFIG_THERMAL_GOV_USER_SPACE is not set -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -# CONFIG_CPU_THERMAL is not set -# CONFIG_CLOCK_THERMAL is not set -# CONFIG_DEVFREQ_THERMAL is not set -CONFIG_THERMAL_EMULATION=y -CONFIG_HISI_THERMAL=y -# CONFIG_QORIQ_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# - -# -# Broadcom thermal drivers -# -# CONFIG_BCM2835_THERMAL is not set - -# -# Samsung thermal drivers -# -CONFIG_EXYNOS_THERMAL=y - -# -# Qualcomm thermal drivers -# -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CROS_EC is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_EXYNOS_LPASS is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_QCOM_RPM is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK808 is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SMSC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS80031 is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ROHM_BD718XX is not set -CONFIG_MFD_VEXPRESS_SYSREG=y -# CONFIG_RAVE_SP_CORE is not set -# CONFIG_REGULATOR is not set -# CONFIG_RC_CORE is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -# CONFIG_VGA_ARB is not set -# CONFIG_DRM is not set -# CONFIG_DRM_DP_CEC is not set - -# -# ACP (Audio CoProcessor) Configuration -# - -# -# AMD Library routines -# -# CONFIG_DRM_XEN is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y - -# -# Frame buffer Devices -# -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_ARMCLCD is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -CONFIG_FB_EFI=y -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -CONFIG_XEN_FBDEV_FRONTEND=y -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -CONFIG_FB_SIMPLE=y -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# CONFIG_LOGO is not set -# CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -# CONFIG_UHID is not set -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACCUTOUCH is not set -# CONFIG_HID_ACRUX is not set -# CONFIG_HID_APPLE is not set -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_BETOP_FF is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_ITE is not set -# CONFIG_HID_JABRA is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_REDRAGON is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTI is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PLANTRONICS is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_RETRODE is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEAM is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set -# CONFIG_HID_ALPS is not set - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -CONFIG_USB_OTG=y -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_OTG_FSM is not set -# CONFIG_USB_MON is not set -# CONFIG_USB_WUSB_CBAF is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=m -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=m -# CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=m -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=m -# CONFIG_USB_EHCI_EXYNOS is not set -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -CONFIG_USB_OHCI_HCD=m -CONFIG_USB_OHCI_HCD_PCI=m -# CONFIG_USB_OHCI_EXYNOS is not set -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=m -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -CONFIG_USB_DWC2=m -CONFIG_USB_DWC2_HOST=y - -# -# Gadget/Dual-role mode requires USB Gadget support to be enabled -# -# CONFIG_USB_DWC2_PCI is not set -# CONFIG_USB_DWC2_DEBUG is not set -# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -# CONFIG_USB_SERIAL is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HUB_USB251XB is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set - -# -# USB Physical Layer drivers -# -CONFIG_USB_PHY=y -CONFIG_NOP_USB_XCEIV=m -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_ULPI is not set -# CONFIG_USB_GADGET is not set -# CONFIG_TYPEC is not set -# CONFIG_USB_ROLE_SWITCH is not set -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_UWB is not set -CONFIG_MMC=m -CONFIG_PWRSEQ_EMMC=m -CONFIG_PWRSEQ_SIMPLE=m -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -# CONFIG_SDIO_UART is not set -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_ARMMMCI is not set -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -# CONFIG_MMC_SDHCI_PCI is not set -# CONFIG_MMC_SDHCI_ACPI is not set -CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_OF_ESDHC is not set -# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set -# CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -CONFIG_MMC_SDHCI_IPROC=m -# CONFIG_MMC_SDHCI_MSM is not set -# CONFIG_MMC_TIFM_SD is not set -# CONFIG_MMC_CB710 is not set -# CONFIG_MMC_VIA_SDMMC is not set -# CONFIG_MMC_CAVIUM_THUNDERX is not set -# CONFIG_MMC_DW is not set -# CONFIG_MMC_VUB300 is not set -# CONFIG_MMC_USHC is not set -# CONFIG_MMC_USDHI6ROL0 is not set -# CONFIG_MMC_CQHCI is not set -# CONFIG_MMC_TOSHIBA_PCI is not set -CONFIG_MMC_BCM2835=m -# CONFIG_MMC_MTK is not set -# CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -CONFIG_EDAC_SUPPORT=y -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABX80X is not set -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_HYM8563 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_ISL12026 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8010 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV8803 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=y -CONFIG_RTC_DRV_DS3232_HWMON=y -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -CONFIG_RTC_DRV_EFI=y -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -CONFIG_HAVE_S3C_RTC=y -CONFIG_RTC_DRV_S3C=y -# CONFIG_RTC_DRV_PL030 is not set -CONFIG_RTC_DRV_PL031=y -# CONFIG_RTC_DRV_FTRTC010 is not set -# CONFIG_RTC_DRV_SNVS is not set -# CONFIG_RTC_DRV_R7301 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=m -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_BCM_SBA_RAID is not set -CONFIG_DMA_BCM2835=m -# CONFIG_DW_AXI_DMAC is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_K3_DMA is not set -# CONFIG_MV_XOR_V2 is not set -# CONFIG_PL330_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_ZYNQMP_DMA is not set -# CONFIG_QCOM_BAM_DMA is not set -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -# CONFIG_DMATEST is not set - -# -# DMABUF options -# -# CONFIG_SYNC_FILE is not set -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=m -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -# CONFIG_UIO_DMEM_GENIRQ is not set -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -# CONFIG_UIO_PCI_GENERIC is not set -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -# CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO=y -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=y -CONFIG_VIRTIO_MMIO=y -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y - -# -# Microsoft Hyper-V guest support -# - -# -# Xen driver support -# -CONFIG_XEN_BALLOON=y -CONFIG_XEN_SCRUB_PAGES_DEFAULT=y -CONFIG_XEN_DEV_EVTCHN=y -# CONFIG_XEN_BACKEND is not set -CONFIG_XENFS=y -CONFIG_XEN_COMPAT_XENFS=y -CONFIG_XEN_SYS_HYPERVISOR=y -CONFIG_XEN_XENBUS_FRONTEND=y -CONFIG_XEN_GNTDEV=y -CONFIG_XEN_GRANT_DEV_ALLOC=y -# CONFIG_XEN_GRANT_DMA_ALLOC is not set -CONFIG_SWIOTLB_XEN=y -CONFIG_XEN_PVCALLS_FRONTEND=y -CONFIG_XEN_PRIVCMD=y -CONFIG_XEN_EFI=y -CONFIG_XEN_AUTO_XLATE=y -# CONFIG_STAGING is not set -# CONFIG_GOLDFISH is not set -# CONFIG_CHROME_PLATFORMS is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Common Clock Framework -# -CONFIG_COMMON_CLK_VERSATILE=y -CONFIG_CLK_SP810=y -CONFIG_CLK_VEXPRESS_OSC=y -# CONFIG_CLK_HSDK is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_CLK_QORIQ is not set -# CONFIG_COMMON_CLK_XGENE is not set -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_VC5 is not set -CONFIG_COMMON_CLK_HI3516CV300=y -# CONFIG_COMMON_CLK_HI3519 is not set -CONFIG_COMMON_CLK_HI3660=y -CONFIG_COMMON_CLK_HI3798CV200=y -# CONFIG_COMMON_CLK_HI6220 is not set -CONFIG_RESET_HISI=y -# CONFIG_STUB_CLK_HI3660 is not set -# CONFIG_COMMON_CLK_QCOM is not set -CONFIG_COMMON_CLK_SAMSUNG=y -CONFIG_EXYNOS_ARM64_COMMON_CLK=y -# CONFIG_EXYNOS_AUDSS_CLK_CON is not set -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -CONFIG_TIMER_OF=y -CONFIG_TIMER_ACPI=y -CONFIG_TIMER_PROBE=y -CONFIG_CLKSRC_MMIO=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y -CONFIG_FSL_ERRATUM_A008585=y -CONFIG_HISILICON_ERRATUM_161010101=y -CONFIG_ARM64_ERRATUM_858921=y -CONFIG_ARM_TIMER_SP804=y -CONFIG_CLKSRC_VERSATILE=y -CONFIG_MAILBOX=y -# CONFIG_ARM_MHU is not set -# CONFIG_PLATFORM_MHU is not set -# CONFIG_PL320_MBOX is not set -# CONFIG_PCC is not set -# CONFIG_ALTERA_MBOX is not set -CONFIG_BCM2835_MBOX=y -# CONFIG_HI3660_MBOX is not set -# CONFIG_HI6220_MBOX is not set -# CONFIG_MAILBOX_TEST is not set -# CONFIG_QCOM_APCS_IPC is not set -# CONFIG_IOMMU_SUPPORT is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# - -# -# Broadcom SoC drivers -# -CONFIG_RASPBERRYPI_POWER=y -# CONFIG_SOC_BRCMSTB is not set - -# -# NXP/Freescale QorIQ SoC drivers -# -# CONFIG_FSL_DPAA is not set - -# -# i.MX SoC drivers -# - -# -# Qualcomm SoC drivers -# -# CONFIG_QCOM_COMMAND_DB is not set -# CONFIG_QCOM_GENI_SE is not set -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_LLCC is not set -# CONFIG_QCOM_RMTFS_MEM is not set -# CONFIG_QCOM_RPMH is not set -CONFIG_SOC_SAMSUNG=y -CONFIG_EXYNOS_PMU=y -CONFIG_EXYNOS_PM_DOMAINS=y -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# CONFIG_XILINX_VCU is not set -CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y -# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set -# CONFIG_DEVFREQ_GOV_POWERSAVE is not set -# CONFIG_DEVFREQ_GOV_USERSPACE is not set -# CONFIG_DEVFREQ_GOV_PASSIVE is not set - -# -# DEVFREQ Drivers -# -# CONFIG_ARM_EXYNOS_BUS_DEVFREQ is not set -# CONFIG_PM_DEVFREQ_EVENT is not set -CONFIG_EXTCON=y - -# -# Extcon Device Drivers -# -# CONFIG_EXTCON_GPIO is not set -# CONFIG_EXTCON_MAX3355 is not set -# CONFIG_EXTCON_QCOM_SPMI_MISC is not set -# CONFIG_EXTCON_RT8973A is not set -# CONFIG_EXTCON_SM5502 is not set -# CONFIG_EXTCON_USB_GPIO is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -CONFIG_PWM_BCM2835=m -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_HIBVT is not set -# CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_SAMSUNG is not set - -# -# IRQ chip support -# -CONFIG_IRQCHIP=y -CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -CONFIG_ARM_GIC_V3_ITS_PCI=y -CONFIG_HISILICON_IRQ_MBIGEN=y -CONFIG_LS_SCFG_MSI=y -CONFIG_PARTITION_PERCPU=y -# CONFIG_QCOM_IRQ_COMBINER is not set -# CONFIG_QCOM_PDC is not set -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_TI_SYSCON is not set -CONFIG_COMMON_RESET_HI3660=y -# CONFIG_COMMON_RESET_HI6220 is not set -# CONFIG_FMC is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_PHY_XGENE is not set -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_HI6220_USB is not set -# CONFIG_PHY_HISTB_COMBPHY is not set -# CONFIG_PHY_HISI_INNO_USB2 is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_MAPPHONE_MDM6600 is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_QMP is not set -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_UFS is not set -# CONFIG_PHY_EXYNOS_DP_VIDEO is not set -# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set -# CONFIG_PHY_EXYNOS_PCIE is not set -# CONFIG_PHY_SAMSUNG_USB2 is not set -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -CONFIG_ARM_CCI_PMU=y -CONFIG_ARM_CCI400_PMU=y -CONFIG_ARM_CCI5xx_PMU=y -CONFIG_ARM_CCN=y -CONFIG_ARM_PMU=y -CONFIG_ARM_PMU_ACPI=y -# CONFIG_ARM_DSU_PMU is not set -# CONFIG_HISI_PMU is not set -# CONFIG_QCOM_L2_PMU is not set -# CONFIG_QCOM_L3_PMU is not set -# CONFIG_ARM_SPE_PMU is not set -# CONFIG_RAS is not set - -# -# Android -# -# CONFIG_ANDROID is not set -# CONFIG_LIBNVDIMM is not set -CONFIG_DAX=y -# CONFIG_DEV_DAX is not set -CONFIG_NVMEM=y -# CONFIG_QCOM_QFPROM is not set - -# -# HW tracing support -# -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# CONFIG_FPGA is not set -# CONFIG_FSI is not set -# CONFIG_TEE is not set -CONFIG_PM_OPP=y -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_FS_IOMAP=y -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_ENCRYPTION is not set -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_FS_DAX is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_FILE_LOCKING=y -CONFIG_MANDATORY_FILE_LOCKING=y -CONFIG_FS_ENCRYPTION=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_PRINT_QUOTA_WARNING=y -# CONFIG_QUOTA_DEBUG is not set -# CONFIG_QFMT_V1 is not set -# CONFIG_QFMT_V2 is not set -CONFIG_QUOTACTL=y -# CONFIG_AUTOFS4_FS is not set -# CONFIG_AUTOFS_FS is not set -CONFIG_FUSE_FS=y -CONFIG_CUSE=y -CONFIG_OVERLAY_FS=y -# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -# CONFIG_OVERLAY_FS_INDEX is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set - -# -# Caches -# -CONFIG_FSCACHE=y -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=y -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="utf8" -# CONFIG_FAT_DEFAULT_UTF8 is not set -CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -# CONFIG_NTFS_RW is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -# CONFIG_TMPFS_POSIX_ACL is not set -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_MEMFD_CREATE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFLATE_COMPRESS=y -# CONFIG_PSTORE_LZO_COMPRESS is not set -# CONFIG_PSTORE_LZ4_COMPRESS is not set -# CONFIG_PSTORE_LZ4HC_COMPRESS is not set -# CONFIG_PSTORE_842_COMPRESS is not set -# CONFIG_PSTORE_ZSTD_COMPRESS is not set -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y -CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -# CONFIG_PSTORE_RAM is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V3=m -# CONFIG_NFS_V3_ACL is not set -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFSD=m -CONFIG_NFSD_V3=y -# CONFIG_NFSD_V3_ACL is not set -CONFIG_NFSD_V4=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -# CONFIG_NFSD_SCSILAYOUT is not set -# CONFIG_NFSD_FLEXFILELAYOUT is not set -# CONFIG_NFSD_V4_SECURITY_LABEL is not set -# CONFIG_NFSD_FAULT_INJECTION is not set -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -# CONFIG_SUNRPC_DEBUG is not set -CONFIG_CEPH_FS=m -CONFIG_CEPH_FSCACHE=y -CONFIG_CEPH_FS_POSIX_ACL=y -CONFIG_CIFS=y -# CONFIG_CIFS_STATS2 is not set -# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set -# CONFIG_CIFS_UPCALL is not set -CONFIG_CIFS_XATTR=y -# CONFIG_CIFS_ACL is not set -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_CIFS_FSCACHE=y -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_9P_FS=y -CONFIG_9P_FSCACHE=y -CONFIG_9P_FS_POSIX_ACL=y -CONFIG_9P_FS_SECURITY=y -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="iso8859-1" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set - -# -# Security options -# -CONFIG_KEYS=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_BIG_KEYS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_ENCRYPTED_KEYS=y -CONFIG_KEY_DH_OPERATIONS=y -CONFIG_SECURITY_DMESG_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_FALLBACK=y -# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set -CONFIG_FORTIFY_SOURCE=y -CONFIG_STATIC_USERMODEHELPER=y -CONFIG_STATIC_USERMODEHELPER_PATH="/sbin/usermode-helper" -# CONFIG_SECURITY_SELINUX is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -CONFIG_SECURITY_YAMA=y -CONFIG_INTEGRITY=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -CONFIG_IMA_MEASURE_PCR_IDX=10 -# CONFIG_IMA_TEMPLATE is not set -CONFIG_IMA_NG_TEMPLATE=y -# CONFIG_IMA_SIG_TEMPLATE is not set -CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" -# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set -CONFIG_IMA_DEFAULT_HASH_SHA256=y -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -# CONFIG_IMA_DEFAULT_HASH_WP512 is not set -CONFIG_IMA_DEFAULT_HASH="sha256" -# CONFIG_IMA_WRITE_POLICY is not set -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set -CONFIG_IMA_APPRAISE_BOOTPARAM=y -CONFIG_EVM=y -CONFIG_EVM_ATTR_FSUUID=y -# CONFIG_EVM_ADD_XATTRS is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEFAULT_SECURITY="" -CONFIG_XOR_BLOCKS=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=y -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=y -# CONFIG_CRYPTO_ECDH is not set -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=y -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -# CONFIG_CRYPTO_MCRYPTD is not set -CONFIG_CRYPTO_AUTHENC=y -# CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_SIMD=y -CONFIG_CRYPTO_ENGINE=m - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_CHACHA20POLY1305=y -# CONFIG_CRYPTO_AEGIS128 is not set -# CONFIG_CRYPTO_AEGIS128L is not set -# CONFIG_CRYPTO_AEGIS256 is not set -# CONFIG_CRYPTO_MORUS640 is not set -# CONFIG_CRYPTO_MORUS1280 is not set -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=y - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_CFB is not set -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_XTS=y -CONFIG_CRYPTO_KEYWRAP=y - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_VMAC=y - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=y -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_POLY1305=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_RMD128=y -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_RMD256=y -CONFIG_CRYPTO_RMD320=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_SHA3 is not set -# CONFIG_CRYPTO_SM3 is not set -CONFIG_CRYPTO_TGR192=y -CONFIG_CRYPTO_WP512=y - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_BLOWFISH=y -CONFIG_CRYPTO_BLOWFISH_COMMON=y -CONFIG_CRYPTO_CAMELLIA=y -CONFIG_CRYPTO_CAST_COMMON=y -CONFIG_CRYPTO_CAST5=y -CONFIG_CRYPTO_CAST6=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_FCRYPT=y -CONFIG_CRYPTO_KHAZAD=y -CONFIG_CRYPTO_SALSA20=y -CONFIG_CRYPTO_CHACHA20=y -CONFIG_CRYPTO_SEED=y -CONFIG_CRYPTO_SERPENT=y -# CONFIG_CRYPTO_SM4 is not set -CONFIG_CRYPTO_TEA=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_TWOFISH_COMMON=y - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_842=y -CONFIG_CRYPTO_LZ4=y -CONFIG_CRYPTO_LZ4HC=y -# CONFIG_CRYPTO_ZSTD is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -# CONFIG_CRYPTO_DRBG_HASH is not set -# CONFIG_CRYPTO_DRBG_CTR is not set -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_FSL_CAAM is not set -# CONFIG_CRYPTO_DEV_EXYNOS_RNG is not set -# CONFIG_CRYPTO_DEV_S5P is not set -# CONFIG_CRYPTO_DEV_CCP is not set -# CONFIG_CAVIUM_CPT is not set -# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m -CONFIG_CRYPTO_DEV_QCE=m -CONFIG_CRYPTO_DEV_QCOM_RNG=m -CONFIG_CRYPTO_DEV_VIRTIO=m -# CONFIG_CRYPTO_DEV_CCREE is not set -CONFIG_CRYPTO_DEV_HISI_SEC=m -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_PKCS7_MESSAGE_PARSER=y - -# -# Certificates for signature checking -# -# CONFIG_SYSTEM_TRUSTED_KEYRING is not set -# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_BITREVERSE=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_RATIONAL=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -# CONFIG_INDIRECT_PIO is not set -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -# CONFIG_CRC64 is not set -# CONFIG_CRC4 is not set -# CONFIG_CRC7 is not set -CONFIG_LIBCRC32C=y -# CONFIG_CRC8 is not set -CONFIG_XXHASH=m -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_842_COMPRESS=y -CONFIG_842_DECOMPRESS=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=y -CONFIG_LZ4HC_COMPRESS=y -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=m -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=y -CONFIG_TEXTSEARCH_BM=y -CONFIG_TEXTSEARCH_FSM=y -CONFIG_RADIX_TREE_MULTIORDER=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_DMA_DIRECT_OPS=y -CONFIG_SWIOTLB=y -CONFIG_SGL_ALLOC=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -# CONFIG_CORDIC is not set -# CONFIG_DDR is not set -# CONFIG_IRQ_POLL is not set -CONFIG_MPILIB=y -CONFIG_SIGNATURE=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_SPLIT=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_SBITMAP=y -# CONFIG_STRING_SELFTEST is not set - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_DYNAMIC_DEBUG is not set - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_INFO_SPLIT=y -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_GDB_SCRIPTS is not set -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=1024 -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_READABLE_ASM is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -CONFIG_PAGE_EXTENSION=y -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_PAGE_POISONING=y -CONFIG_PAGE_POISONING_NO_SANITY=y -CONFIG_PAGE_POISONING_ZERO=y -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -# CONFIG_SOFTLOCKUP_DETECTOR is not set -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -# CONFIG_SCHEDSTATS is not set -# CONFIG_SCHED_STACK_END_CHECK is not set -# CONFIG_DEBUG_TIMEKEEPING is not set -CONFIG_DEBUG_PREEMPT=y - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_HAVE_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_DEBUG_CREDENTIALS=y - -# -# RCU Debugging -# -# CONFIG_RCU_PERF_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -CONFIG_FTRACE_SYSCALLS=y -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_KPROBE_EVENTS=y -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_DYNAMIC_FTRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_FTRACE_MCOUNT_RECORD=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_HIST_TRIGGERS is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_DMA_API_DEBUG is not set -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_TEST_SORT is not set -# CONFIG_KPROBES_SANITY_TEST is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_BITFIELD is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_OVERFLOW is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_MEMTEST is not set -CONFIG_BUG_ON_DATA_CORRUPTION=y -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -CONFIG_UBSAN=y -# CONFIG_UBSAN_SANITIZE_ALL is not set -# CONFIG_UBSAN_ALIGNMENT is not set -# CONFIG_TEST_UBSAN is not set -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set -# CONFIG_PID_IN_CONTEXTIDR is not set -# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_WX is not set -# CONFIG_DEBUG_ALIGN_RODATA is not set -# CONFIG_DEBUG_EFI is not set -# CONFIG_ARM64_RELOC_TEST is not set -# CONFIG_CORESIGHT is not set diff --git a/kernel/config-4.19.x-aarch64-rt b/kernel/config-4.19.x-aarch64-rt deleted file mode 100644 index 59ef068ac..000000000 --- a/kernel/config-4.19.x-aarch64-rt +++ /dev/null @@ -1,20 +0,0 @@ -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_MEMCG_SYSFS_ON is not set -CONFIG_SLUB=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_PREEMPT=y -CONFIG_PREEMPT_RT_BASE=y -CONFIG_HAVE_PREEMPT_LAZY=y -CONFIG_PREEMPT_LAZY=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT__LL is not set -# CONFIG_PREEMPT_RTB is not set -CONFIG_PREEMPT_RT_FULL=y -CONFIG_PREEMPT_COUNT=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_DEBUG_PREEMPT=y -# CONFIG_PREEMPT_TRACER is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 diff --git a/kernel/config-4.19.x-x86_64-rt b/kernel/config-4.19.x-x86_64-rt deleted file mode 100644 index 3c833eb4b..000000000 --- a/kernel/config-4.19.x-x86_64-rt +++ /dev/null @@ -1,22 +0,0 @@ -CONFIG_RWSEM_GENERIC_SPINLOCK=y -# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set -CONFIG_PREEMPT_RCU=y -CONFIG_TASKS_RCU=y -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_MEMCG_SYSFS_ON is not set -CONFIG_SLUB=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_PREEMPT=y -CONFIG_PREEMPT_RT_BASE=y -CONFIG_HAVE_PREEMPT_LAZY=y -CONFIG_PREEMPT_LAZY=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT__LL is not set -# CONFIG_PREEMPT_RTB is not set -CONFIG_PREEMPT_RT_FULL=y -CONFIG_PREEMPT_COUNT=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_SLUB_STATS is not set -CONFIG_DEBUG_PREEMPT=y -# CONFIG_PREEMPT_TRACER is not set diff --git a/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch b/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch deleted file mode 100644 index 8d219af90..000000000 --- a/kernel/patches-4.19.x-rt/0001-ARM-at91-add-TCB-registers-definitions.patch +++ /dev/null @@ -1,209 +0,0 @@ -From d831f2ac120e802a4ff642f48f6b88e543665514 Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:18 +0200 -Subject: [PATCH 001/328] ARM: at91: add TCB registers definitions - -Add registers and bits definitions for the timer counter blocks found on -Atmel ARM SoCs. - -Tested-by: Alexander Dahl -Tested-by: Andras Szemzo -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - include/soc/at91/atmel_tcb.h | 183 +++++++++++++++++++++++++++++++++++ - 1 file changed, 183 insertions(+) - create mode 100644 include/soc/at91/atmel_tcb.h - -diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h -new file mode 100644 -index 000000000000..657e234b1483 ---- /dev/null -+++ b/include/soc/at91/atmel_tcb.h -@@ -0,0 +1,183 @@ -+//SPDX-License-Identifier: GPL-2.0 -+/* Copyright (C) 2018 Microchip */ -+ -+#ifndef __SOC_ATMEL_TCB_H -+#define __SOC_ATMEL_TCB_H -+ -+/* Channel registers */ -+#define ATMEL_TC_COFFS(c) ((c) * 0x40) -+#define ATMEL_TC_CCR(c) ATMEL_TC_COFFS(c) -+#define ATMEL_TC_CMR(c) (ATMEL_TC_COFFS(c) + 0x4) -+#define ATMEL_TC_SMMR(c) (ATMEL_TC_COFFS(c) + 0x8) -+#define ATMEL_TC_RAB(c) (ATMEL_TC_COFFS(c) + 0xc) -+#define ATMEL_TC_CV(c) (ATMEL_TC_COFFS(c) + 0x10) -+#define ATMEL_TC_RA(c) (ATMEL_TC_COFFS(c) + 0x14) -+#define ATMEL_TC_RB(c) (ATMEL_TC_COFFS(c) + 0x18) -+#define ATMEL_TC_RC(c) (ATMEL_TC_COFFS(c) + 0x1c) -+#define ATMEL_TC_SR(c) (ATMEL_TC_COFFS(c) + 0x20) -+#define ATMEL_TC_IER(c) (ATMEL_TC_COFFS(c) + 0x24) -+#define ATMEL_TC_IDR(c) (ATMEL_TC_COFFS(c) + 0x28) -+#define ATMEL_TC_IMR(c) (ATMEL_TC_COFFS(c) + 0x2c) -+#define ATMEL_TC_EMR(c) (ATMEL_TC_COFFS(c) + 0x30) -+ -+/* Block registers */ -+#define ATMEL_TC_BCR 0xc0 -+#define ATMEL_TC_BMR 0xc4 -+#define ATMEL_TC_QIER 0xc8 -+#define ATMEL_TC_QIDR 0xcc -+#define ATMEL_TC_QIMR 0xd0 -+#define ATMEL_TC_QISR 0xd4 -+#define ATMEL_TC_FMR 0xd8 -+#define ATMEL_TC_WPMR 0xe4 -+ -+/* CCR fields */ -+#define ATMEL_TC_CCR_CLKEN BIT(0) -+#define ATMEL_TC_CCR_CLKDIS BIT(1) -+#define ATMEL_TC_CCR_SWTRG BIT(2) -+ -+/* Common CMR fields */ -+#define ATMEL_TC_CMR_TCLKS_MSK GENMASK(2, 0) -+#define ATMEL_TC_CMR_TCLK(x) (x) -+#define ATMEL_TC_CMR_XC(x) ((x) + 5) -+#define ATMEL_TC_CMR_CLKI BIT(3) -+#define ATMEL_TC_CMR_BURST_MSK GENMASK(5, 4) -+#define ATMEL_TC_CMR_BURST_XC(x) (((x) + 1) << 4) -+#define ATMEL_TC_CMR_WAVE BIT(15) -+ -+/* Capture mode CMR fields */ -+#define ATMEL_TC_CMR_LDBSTOP BIT(6) -+#define ATMEL_TC_CMR_LDBDIS BIT(7) -+#define ATMEL_TC_CMR_ETRGEDG_MSK GENMASK(9, 8) -+#define ATMEL_TC_CMR_ETRGEDG_NONE (0 << 8) -+#define ATMEL_TC_CMR_ETRGEDG_RISING (1 << 8) -+#define ATMEL_TC_CMR_ETRGEDG_FALLING (2 << 8) -+#define ATMEL_TC_CMR_ETRGEDG_BOTH (3 << 8) -+#define ATMEL_TC_CMR_ABETRG BIT(10) -+#define ATMEL_TC_CMR_CPCTRG BIT(14) -+#define ATMEL_TC_CMR_LDRA_MSK GENMASK(17, 16) -+#define ATMEL_TC_CMR_LDRA_NONE (0 << 16) -+#define ATMEL_TC_CMR_LDRA_RISING (1 << 16) -+#define ATMEL_TC_CMR_LDRA_FALLING (2 << 16) -+#define ATMEL_TC_CMR_LDRA_BOTH (3 << 16) -+#define ATMEL_TC_CMR_LDRB_MSK GENMASK(19, 18) -+#define ATMEL_TC_CMR_LDRB_NONE (0 << 18) -+#define ATMEL_TC_CMR_LDRB_RISING (1 << 18) -+#define ATMEL_TC_CMR_LDRB_FALLING (2 << 18) -+#define ATMEL_TC_CMR_LDRB_BOTH (3 << 18) -+#define ATMEL_TC_CMR_SBSMPLR_MSK GENMASK(22, 20) -+#define ATMEL_TC_CMR_SBSMPLR(x) ((x) << 20) -+ -+/* Waveform mode CMR fields */ -+#define ATMEL_TC_CMR_CPCSTOP BIT(6) -+#define ATMEL_TC_CMR_CPCDIS BIT(7) -+#define ATMEL_TC_CMR_EEVTEDG_MSK GENMASK(9, 8) -+#define ATMEL_TC_CMR_EEVTEDG_NONE (0 << 8) -+#define ATMEL_TC_CMR_EEVTEDG_RISING (1 << 8) -+#define ATMEL_TC_CMR_EEVTEDG_FALLING (2 << 8) -+#define ATMEL_TC_CMR_EEVTEDG_BOTH (3 << 8) -+#define ATMEL_TC_CMR_EEVT_MSK GENMASK(11, 10) -+#define ATMEL_TC_CMR_EEVT_XC(x) (((x) + 1) << 10) -+#define ATMEL_TC_CMR_ENETRG BIT(12) -+#define ATMEL_TC_CMR_WAVESEL_MSK GENMASK(14, 13) -+#define ATMEL_TC_CMR_WAVESEL_UP (0 << 13) -+#define ATMEL_TC_CMR_WAVESEL_UPDOWN (1 << 13) -+#define ATMEL_TC_CMR_WAVESEL_UPRC (2 << 13) -+#define ATMEL_TC_CMR_WAVESEL_UPDOWNRC (3 << 13) -+#define ATMEL_TC_CMR_ACPA_MSK GENMASK(17, 16) -+#define ATMEL_TC_CMR_ACPA(a) (ATMEL_TC_CMR_ACTION_##a << 16) -+#define ATMEL_TC_CMR_ACPC_MSK GENMASK(19, 18) -+#define ATMEL_TC_CMR_ACPC(a) (ATMEL_TC_CMR_ACTION_##a << 18) -+#define ATMEL_TC_CMR_AEEVT_MSK GENMASK(21, 20) -+#define ATMEL_TC_CMR_AEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 20) -+#define ATMEL_TC_CMR_ASWTRG_MSK GENMASK(23, 22) -+#define ATMEL_TC_CMR_ASWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 22) -+#define ATMEL_TC_CMR_BCPB_MSK GENMASK(25, 24) -+#define ATMEL_TC_CMR_BCPB(a) (ATMEL_TC_CMR_ACTION_##a << 24) -+#define ATMEL_TC_CMR_BCPC_MSK GENMASK(27, 26) -+#define ATMEL_TC_CMR_BCPC(a) (ATMEL_TC_CMR_ACTION_##a << 26) -+#define ATMEL_TC_CMR_BEEVT_MSK GENMASK(29, 28) -+#define ATMEL_TC_CMR_BEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 28) -+#define ATMEL_TC_CMR_BSWTRG_MSK GENMASK(31, 30) -+#define ATMEL_TC_CMR_BSWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 30) -+#define ATMEL_TC_CMR_ACTION_NONE 0 -+#define ATMEL_TC_CMR_ACTION_SET 1 -+#define ATMEL_TC_CMR_ACTION_CLEAR 2 -+#define ATMEL_TC_CMR_ACTION_TOGGLE 3 -+ -+/* SMMR fields */ -+#define ATMEL_TC_SMMR_GCEN BIT(0) -+#define ATMEL_TC_SMMR_DOWN BIT(1) -+ -+/* SR/IER/IDR/IMR fields */ -+#define ATMEL_TC_COVFS BIT(0) -+#define ATMEL_TC_LOVRS BIT(1) -+#define ATMEL_TC_CPAS BIT(2) -+#define ATMEL_TC_CPBS BIT(3) -+#define ATMEL_TC_CPCS BIT(4) -+#define ATMEL_TC_LDRAS BIT(5) -+#define ATMEL_TC_LDRBS BIT(6) -+#define ATMEL_TC_ETRGS BIT(7) -+#define ATMEL_TC_CLKSTA BIT(16) -+#define ATMEL_TC_MTIOA BIT(17) -+#define ATMEL_TC_MTIOB BIT(18) -+ -+/* EMR fields */ -+#define ATMEL_TC_EMR_TRIGSRCA_MSK GENMASK(1, 0) -+#define ATMEL_TC_EMR_TRIGSRCA_TIOA 0 -+#define ATMEL_TC_EMR_TRIGSRCA_PWMX 1 -+#define ATMEL_TC_EMR_TRIGSRCB_MSK GENMASK(5, 4) -+#define ATMEL_TC_EMR_TRIGSRCB_TIOB (0 << 4) -+#define ATMEL_TC_EMR_TRIGSRCB_PWM (1 << 4) -+#define ATMEL_TC_EMR_NOCLKDIV BIT(8) -+ -+/* BCR fields */ -+#define ATMEL_TC_BCR_SYNC BIT(0) -+ -+/* BMR fields */ -+#define ATMEL_TC_BMR_TCXC_MSK(c) GENMASK(((c) * 2) + 1, (c) * 2) -+#define ATMEL_TC_BMR_TCXC(x, c) ((x) << (2 * (c))) -+#define ATMEL_TC_BMR_QDEN BIT(8) -+#define ATMEL_TC_BMR_POSEN BIT(9) -+#define ATMEL_TC_BMR_SPEEDEN BIT(10) -+#define ATMEL_TC_BMR_QDTRANS BIT(11) -+#define ATMEL_TC_BMR_EDGPHA BIT(12) -+#define ATMEL_TC_BMR_INVA BIT(13) -+#define ATMEL_TC_BMR_INVB BIT(14) -+#define ATMEL_TC_BMR_INVIDX BIT(15) -+#define ATMEL_TC_BMR_SWAP BIT(16) -+#define ATMEL_TC_BMR_IDXPHB BIT(17) -+#define ATMEL_TC_BMR_AUTOC BIT(18) -+#define ATMEL_TC_MAXFILT_MSK GENMASK(25, 20) -+#define ATMEL_TC_MAXFILT(x) (((x) - 1) << 20) -+#define ATMEL_TC_MAXCMP_MSK GENMASK(29, 26) -+#define ATMEL_TC_MAXCMP(x) ((x) << 26) -+ -+/* QEDC fields */ -+#define ATMEL_TC_QEDC_IDX BIT(0) -+#define ATMEL_TC_QEDC_DIRCHG BIT(1) -+#define ATMEL_TC_QEDC_QERR BIT(2) -+#define ATMEL_TC_QEDC_MPE BIT(3) -+#define ATMEL_TC_QEDC_DIR BIT(8) -+ -+/* FMR fields */ -+#define ATMEL_TC_FMR_ENCF(x) BIT(x) -+ -+/* WPMR fields */ -+#define ATMEL_TC_WPMR_WPKEY (0x54494d << 8) -+#define ATMEL_TC_WPMR_WPEN BIT(0) -+ -+static const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, }; -+ -+static const struct of_device_id atmel_tcb_dt_ids[] = { -+ { -+ .compatible = "atmel,at91rm9200-tcb", -+ .data = (void *)16, -+ }, { -+ .compatible = "atmel,at91sam9x5-tcb", -+ .data = (void *)32, -+ }, { -+ /* sentinel */ -+ } -+}; -+ -+#endif /* __SOC_ATMEL_TCB_H */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch deleted file mode 100644 index 868fc0ef3..000000000 --- a/kernel/patches-4.19.x-rt/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch +++ /dev/null @@ -1,484 +0,0 @@ -From a8f6e3cf352d669d8b870469ab3bff8fc64c3367 Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:19 +0200 -Subject: [PATCH 002/328] clocksource/drivers: Add a new driver for the Atmel - ARM TC blocks - -Add a driver for the Atmel Timer Counter Blocks. This driver provides a -clocksource and two clockevent devices. - -One of the clockevent device is linked to the clocksource counter and so it -will run at the same frequency. This will be used when there is only on TCB -channel available for timers. - -The other clockevent device runs on a separate TCB channel when available. - -This driver uses regmap and syscon to be able to probe early in the boot -and avoid having to switch on the TCB clocksource later. Using regmap also -means that unused TCB channels may be used by other drivers (PWM for -example). read/writel are still used to access channel specific registers -to avoid the performance impact of regmap (mainly locking). - -Tested-by: Alexander Dahl -Tested-by: Andras Szemzo -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/clocksource/Kconfig | 8 + - drivers/clocksource/Makefile | 3 +- - drivers/clocksource/timer-atmel-tcb.c | 410 ++++++++++++++++++++++++++ - 3 files changed, 420 insertions(+), 1 deletion(-) - create mode 100644 drivers/clocksource/timer-atmel-tcb.c - -diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig -index 4d37f018d846..0ab22e7037f4 100644 ---- a/drivers/clocksource/Kconfig -+++ b/drivers/clocksource/Kconfig -@@ -415,6 +415,14 @@ config ATMEL_ST - help - Support for the Atmel ST timer. - -+config ATMEL_ARM_TCB_CLKSRC -+ bool "Microchip ARM TC Block" if COMPILE_TEST -+ select REGMAP_MMIO -+ depends on GENERIC_CLOCKEVENTS -+ help -+ This enables build of clocksource and clockevent driver for -+ the integrated Timer Counter Blocks in Microchip ARM SoCs. -+ - config CLKSRC_EXYNOS_MCT - bool "Exynos multi core timer driver" if COMPILE_TEST - depends on ARM || ARM64 -diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile -index db51b2427e8a..0df9384a1230 100644 ---- a/drivers/clocksource/Makefile -+++ b/drivers/clocksource/Makefile -@@ -3,7 +3,8 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o - obj-$(CONFIG_TIMER_PROBE) += timer-probe.o - obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o - obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o --obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o -+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o -+obj-$(CONFIG_ATMEL_ARM_TCB_CLKSRC) += timer-atmel-tcb.o - obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o - obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o - obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o -diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c -new file mode 100644 -index 000000000000..21fbe430f91b ---- /dev/null -+++ b/drivers/clocksource/timer-atmel-tcb.c -@@ -0,0 +1,410 @@ -+// SPDX-License-Identifier: GPL-2.0 -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct atmel_tcb_clksrc { -+ struct clocksource clksrc; -+ struct clock_event_device clkevt; -+ struct regmap *regmap; -+ void __iomem *base; -+ struct clk *clk[2]; -+ char name[20]; -+ int channels[2]; -+ int bits; -+ int irq; -+ struct { -+ u32 cmr; -+ u32 imr; -+ u32 rc; -+ bool clken; -+ } cache[2]; -+ u32 bmr_cache; -+ bool registered; -+ bool clk_enabled; -+}; -+ -+static struct atmel_tcb_clksrc tc; -+ -+static struct clk *tcb_clk_get(struct device_node *node, int channel) -+{ -+ struct clk *clk; -+ char clk_name[] = "t0_clk"; -+ -+ clk_name[1] += channel; -+ clk = of_clk_get_by_name(node->parent, clk_name); -+ if (!IS_ERR(clk)) -+ return clk; -+ -+ return of_clk_get_by_name(node->parent, "t0_clk"); -+} -+ -+/* -+ * Clocksource and clockevent using the same channel(s) -+ */ -+static u64 tc_get_cycles(struct clocksource *cs) -+{ -+ u32 lower, upper; -+ -+ do { -+ upper = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1])); -+ lower = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0])); -+ } while (upper != readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1]))); -+ -+ return (upper << 16) | lower; -+} -+ -+static u64 tc_get_cycles32(struct clocksource *cs) -+{ -+ return readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0])); -+} -+ -+static u64 notrace tc_sched_clock_read(void) -+{ -+ return tc_get_cycles(&tc.clksrc); -+} -+ -+static u64 notrace tc_sched_clock_read32(void) -+{ -+ return tc_get_cycles32(&tc.clksrc); -+} -+ -+static int tcb_clkevt_next_event(unsigned long delta, -+ struct clock_event_device *d) -+{ -+ u32 old, next, cur; -+ -+ old = readl(tc.base + ATMEL_TC_CV(tc.channels[0])); -+ next = old + delta; -+ writel(next, tc.base + ATMEL_TC_RC(tc.channels[0])); -+ cur = readl(tc.base + ATMEL_TC_CV(tc.channels[0])); -+ -+ /* check whether the delta elapsed while setting the register */ -+ if ((next < old && cur < old && cur > next) || -+ (next > old && (cur < old || cur > next))) { -+ /* -+ * Clear the CPCS bit in the status register to avoid -+ * generating a spurious interrupt next time a valid -+ * timer event is configured. -+ */ -+ old = readl(tc.base + ATMEL_TC_SR(tc.channels[0])); -+ return -ETIME; -+ } -+ -+ writel(ATMEL_TC_CPCS, tc.base + ATMEL_TC_IER(tc.channels[0])); -+ -+ return 0; -+} -+ -+static irqreturn_t tc_clkevt_irq(int irq, void *handle) -+{ -+ unsigned int sr; -+ -+ sr = readl(tc.base + ATMEL_TC_SR(tc.channels[0])); -+ if (sr & ATMEL_TC_CPCS) { -+ tc.clkevt.event_handler(&tc.clkevt); -+ return IRQ_HANDLED; -+ } -+ -+ return IRQ_NONE; -+} -+ -+static int tcb_clkevt_oneshot(struct clock_event_device *dev) -+{ -+ if (clockevent_state_oneshot(dev)) -+ return 0; -+ -+ /* -+ * Because both clockevent devices may share the same IRQ, we don't want -+ * the less likely one to stay requested -+ */ -+ return request_irq(tc.irq, tc_clkevt_irq, IRQF_TIMER | IRQF_SHARED, -+ tc.name, &tc); -+} -+ -+static int tcb_clkevt_shutdown(struct clock_event_device *dev) -+{ -+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[0])); -+ if (tc.bits == 16) -+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[1])); -+ -+ if (!clockevent_state_detached(dev)) -+ free_irq(tc.irq, &tc); -+ -+ return 0; -+} -+ -+static void __init tcb_setup_dual_chan(struct atmel_tcb_clksrc *tc, -+ int mck_divisor_idx) -+{ -+ /* first channel: waveform mode, input mclk/8, clock TIOA on overflow */ -+ writel(mck_divisor_idx /* likely divide-by-8 */ -+ | ATMEL_TC_CMR_WAVE -+ | ATMEL_TC_CMR_WAVESEL_UP /* free-run */ -+ | ATMEL_TC_CMR_ACPA(SET) /* TIOA rises at 0 */ -+ | ATMEL_TC_CMR_ACPC(CLEAR), /* (duty cycle 50%) */ -+ tc->base + ATMEL_TC_CMR(tc->channels[0])); -+ writel(0x0000, tc->base + ATMEL_TC_RA(tc->channels[0])); -+ writel(0x8000, tc->base + ATMEL_TC_RC(tc->channels[0])); -+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */ -+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0])); -+ -+ /* second channel: waveform mode, input TIOA */ -+ writel(ATMEL_TC_CMR_XC(tc->channels[1]) /* input: TIOA */ -+ | ATMEL_TC_CMR_WAVE -+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */ -+ tc->base + ATMEL_TC_CMR(tc->channels[1])); -+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[1])); /* no irqs */ -+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[1])); -+ -+ /* chain both channel, we assume the previous channel */ -+ regmap_write(tc->regmap, ATMEL_TC_BMR, -+ ATMEL_TC_BMR_TCXC(1 + tc->channels[1], tc->channels[1])); -+ /* then reset all the timers */ -+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); -+} -+ -+static void __init tcb_setup_single_chan(struct atmel_tcb_clksrc *tc, -+ int mck_divisor_idx) -+{ -+ /* channel 0: waveform mode, input mclk/8 */ -+ writel(mck_divisor_idx /* likely divide-by-8 */ -+ | ATMEL_TC_CMR_WAVE -+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */ -+ tc->base + ATMEL_TC_CMR(tc->channels[0])); -+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */ -+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0])); -+ -+ /* then reset all the timers */ -+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); -+} -+ -+static void tc_clksrc_suspend(struct clocksource *cs) -+{ -+ int i; -+ -+ for (i = 0; i < 1 + (tc.bits == 16); i++) { -+ tc.cache[i].cmr = readl(tc.base + ATMEL_TC_CMR(tc.channels[i])); -+ tc.cache[i].imr = readl(tc.base + ATMEL_TC_IMR(tc.channels[i])); -+ tc.cache[i].rc = readl(tc.base + ATMEL_TC_RC(tc.channels[i])); -+ tc.cache[i].clken = !!(readl(tc.base + -+ ATMEL_TC_SR(tc.channels[i])) & -+ ATMEL_TC_CLKSTA); -+ } -+ -+ if (tc.bits == 16) -+ regmap_read(tc.regmap, ATMEL_TC_BMR, &tc.bmr_cache); -+} -+ -+static void tc_clksrc_resume(struct clocksource *cs) -+{ -+ int i; -+ -+ for (i = 0; i < 1 + (tc.bits == 16); i++) { -+ /* Restore registers for the channel, RA and RB are not used */ -+ writel(tc.cache[i].cmr, tc.base + ATMEL_TC_CMR(tc.channels[i])); -+ writel(tc.cache[i].rc, tc.base + ATMEL_TC_RC(tc.channels[i])); -+ writel(0, tc.base + ATMEL_TC_RA(tc.channels[i])); -+ writel(0, tc.base + ATMEL_TC_RB(tc.channels[i])); -+ /* Disable all the interrupts */ -+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[i])); -+ /* Reenable interrupts that were enabled before suspending */ -+ writel(tc.cache[i].imr, tc.base + ATMEL_TC_IER(tc.channels[i])); -+ -+ /* Start the clock if it was used */ -+ if (tc.cache[i].clken) -+ writel(ATMEL_TC_CCR_CLKEN, tc.base + -+ ATMEL_TC_CCR(tc.channels[i])); -+ } -+ -+ /* in case of dual channel, chain channels */ -+ if (tc.bits == 16) -+ regmap_write(tc.regmap, ATMEL_TC_BMR, tc.bmr_cache); -+ /* Finally, trigger all the channels*/ -+ regmap_write(tc.regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC); -+} -+ -+static int __init tcb_clksrc_register(struct device_node *node, -+ struct regmap *regmap, void __iomem *base, -+ int channel, int channel1, int irq, -+ int bits) -+{ -+ u32 rate, divided_rate = 0; -+ int best_divisor_idx = -1; -+ int i, err = -1; -+ u64 (*tc_sched_clock)(void); -+ -+ tc.regmap = regmap; -+ tc.base = base; -+ tc.channels[0] = channel; -+ tc.channels[1] = channel1; -+ tc.irq = irq; -+ tc.bits = bits; -+ -+ tc.clk[0] = tcb_clk_get(node, tc.channels[0]); -+ if (IS_ERR(tc.clk[0])) -+ return PTR_ERR(tc.clk[0]); -+ err = clk_prepare_enable(tc.clk[0]); -+ if (err) { -+ pr_debug("can't enable T0 clk\n"); -+ goto err_clk; -+ } -+ -+ /* How fast will we be counting? Pick something over 5 MHz. */ -+ rate = (u32)clk_get_rate(tc.clk[0]); -+ for (i = 0; i < 5; i++) { -+ unsigned int divisor = atmel_tc_divisors[i]; -+ unsigned int tmp; -+ -+ if (!divisor) -+ continue; -+ -+ tmp = rate / divisor; -+ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); -+ if (best_divisor_idx > 0) { -+ if (tmp < 5 * 1000 * 1000) -+ continue; -+ } -+ divided_rate = tmp; -+ best_divisor_idx = i; -+ } -+ -+ if (tc.bits == 32) { -+ tc.clksrc.read = tc_get_cycles32; -+ tcb_setup_single_chan(&tc, best_divisor_idx); -+ tc_sched_clock = tc_sched_clock_read32; -+ snprintf(tc.name, sizeof(tc.name), "%s:%d", -+ kbasename(node->parent->full_name), tc.channels[0]); -+ } else { -+ tc.clk[1] = tcb_clk_get(node, tc.channels[1]); -+ if (IS_ERR(tc.clk[1])) -+ goto err_disable_t0; -+ -+ err = clk_prepare_enable(tc.clk[1]); -+ if (err) { -+ pr_debug("can't enable T1 clk\n"); -+ goto err_clk1; -+ } -+ tc.clksrc.read = tc_get_cycles, -+ tcb_setup_dual_chan(&tc, best_divisor_idx); -+ tc_sched_clock = tc_sched_clock_read; -+ snprintf(tc.name, sizeof(tc.name), "%s:%d,%d", -+ kbasename(node->parent->full_name), tc.channels[0], -+ tc.channels[1]); -+ } -+ -+ pr_debug("%s at %d.%03d MHz\n", tc.name, -+ divided_rate / 1000000, -+ ((divided_rate + 500000) % 1000000) / 1000); -+ -+ tc.clksrc.name = tc.name; -+ tc.clksrc.suspend = tc_clksrc_suspend; -+ tc.clksrc.resume = tc_clksrc_resume; -+ tc.clksrc.rating = 200; -+ tc.clksrc.mask = CLOCKSOURCE_MASK(32); -+ tc.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; -+ -+ err = clocksource_register_hz(&tc.clksrc, divided_rate); -+ if (err) -+ goto err_disable_t1; -+ -+ sched_clock_register(tc_sched_clock, 32, divided_rate); -+ -+ tc.registered = true; -+ -+ /* Set up and register clockevents */ -+ tc.clkevt.name = tc.name; -+ tc.clkevt.cpumask = cpumask_of(0); -+ tc.clkevt.set_next_event = tcb_clkevt_next_event; -+ tc.clkevt.set_state_oneshot = tcb_clkevt_oneshot; -+ tc.clkevt.set_state_shutdown = tcb_clkevt_shutdown; -+ tc.clkevt.features = CLOCK_EVT_FEAT_ONESHOT; -+ tc.clkevt.rating = 125; -+ -+ clockevents_config_and_register(&tc.clkevt, divided_rate, 1, -+ BIT(tc.bits) - 1); -+ -+ return 0; -+ -+err_disable_t1: -+ if (tc.bits == 16) -+ clk_disable_unprepare(tc.clk[1]); -+ -+err_clk1: -+ if (tc.bits == 16) -+ clk_put(tc.clk[1]); -+ -+err_disable_t0: -+ clk_disable_unprepare(tc.clk[0]); -+ -+err_clk: -+ clk_put(tc.clk[0]); -+ -+ pr_err("%s: unable to register clocksource/clockevent\n", -+ tc.clksrc.name); -+ -+ return err; -+} -+ -+static int __init tcb_clksrc_init(struct device_node *node) -+{ -+ const struct of_device_id *match; -+ struct regmap *regmap; -+ void __iomem *tcb_base; -+ u32 channel; -+ int irq, err, chan1 = -1; -+ unsigned bits; -+ -+ if (tc.registered) -+ return -ENODEV; -+ -+ /* -+ * The regmap has to be used to access registers that are shared -+ * between channels on the same TCB but we keep direct IO access for -+ * the counters to avoid the impact on performance -+ */ -+ regmap = syscon_node_to_regmap(node->parent); -+ if (IS_ERR(regmap)) -+ return PTR_ERR(regmap); -+ -+ tcb_base = of_iomap(node->parent, 0); -+ if (!tcb_base) { -+ pr_err("%s +%d %s\n", __FILE__, __LINE__, __func__); -+ return -ENXIO; -+ } -+ -+ match = of_match_node(atmel_tcb_dt_ids, node->parent); -+ bits = (uintptr_t)match->data; -+ -+ err = of_property_read_u32_index(node, "reg", 0, &channel); -+ if (err) -+ return err; -+ -+ irq = of_irq_get(node->parent, channel); -+ if (irq < 0) { -+ irq = of_irq_get(node->parent, 0); -+ if (irq < 0) -+ return irq; -+ } -+ -+ if (bits == 16) { -+ of_property_read_u32_index(node, "reg", 1, &chan1); -+ if (chan1 == -1) { -+ pr_err("%s: clocksource needs two channels\n", -+ node->parent->full_name); -+ return -EINVAL; -+ } -+ } -+ -+ return tcb_clksrc_register(node, regmap, tcb_base, channel, chan1, irq, -+ bits); -+} -+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch b/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch deleted file mode 100644 index 9b9e0e7f6..000000000 --- a/kernel/patches-4.19.x-rt/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch +++ /dev/null @@ -1,270 +0,0 @@ -From f2e0ea85054574af7f632ca36991c5c1a25a7bfd Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:20 +0200 -Subject: [PATCH 003/328] clocksource/drivers: timer-atmel-tcb: add clockevent - device on separate channel - -Add an other clockevent device that uses a separate TCB channel when -available. - -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/clocksource/timer-atmel-tcb.c | 217 +++++++++++++++++++++++++- - 1 file changed, 212 insertions(+), 5 deletions(-) - -diff --git a/drivers/clocksource/timer-atmel-tcb.c b/drivers/clocksource/timer-atmel-tcb.c -index 21fbe430f91b..63ce3b69338a 100644 ---- a/drivers/clocksource/timer-atmel-tcb.c -+++ b/drivers/clocksource/timer-atmel-tcb.c -@@ -32,7 +32,7 @@ struct atmel_tcb_clksrc { - bool clk_enabled; - }; - --static struct atmel_tcb_clksrc tc; -+static struct atmel_tcb_clksrc tc, tce; - - static struct clk *tcb_clk_get(struct device_node *node, int channel) - { -@@ -47,6 +47,203 @@ static struct clk *tcb_clk_get(struct device_node *node, int channel) - return of_clk_get_by_name(node->parent, "t0_clk"); - } - -+/* -+ * Clockevent device using its own channel -+ */ -+ -+static void tc_clkevt2_clk_disable(struct clock_event_device *d) -+{ -+ clk_disable(tce.clk[0]); -+ tce.clk_enabled = false; -+} -+ -+static void tc_clkevt2_clk_enable(struct clock_event_device *d) -+{ -+ if (tce.clk_enabled) -+ return; -+ clk_enable(tce.clk[0]); -+ tce.clk_enabled = true; -+} -+ -+static int tc_clkevt2_stop(struct clock_event_device *d) -+{ -+ writel(0xff, tce.base + ATMEL_TC_IDR(tce.channels[0])); -+ writel(ATMEL_TC_CCR_CLKDIS, tce.base + ATMEL_TC_CCR(tce.channels[0])); -+ -+ return 0; -+} -+ -+static int tc_clkevt2_shutdown(struct clock_event_device *d) -+{ -+ tc_clkevt2_stop(d); -+ if (!clockevent_state_detached(d)) -+ tc_clkevt2_clk_disable(d); -+ -+ return 0; -+} -+ -+/* For now, we always use the 32K clock ... this optimizes for NO_HZ, -+ * because using one of the divided clocks would usually mean the -+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz). -+ * -+ * A divided clock could be good for high resolution timers, since -+ * 30.5 usec resolution can seem "low". -+ */ -+static int tc_clkevt2_set_oneshot(struct clock_event_device *d) -+{ -+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) -+ tc_clkevt2_stop(d); -+ -+ tc_clkevt2_clk_enable(d); -+ -+ /* slow clock, count up to RC, then irq and stop */ -+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_CPCSTOP | -+ ATMEL_TC_CMR_WAVE | ATMEL_TC_CMR_WAVESEL_UPRC, -+ tce.base + ATMEL_TC_CMR(tce.channels[0])); -+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0])); -+ -+ return 0; -+} -+ -+static int tc_clkevt2_set_periodic(struct clock_event_device *d) -+{ -+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) -+ tc_clkevt2_stop(d); -+ -+ /* By not making the gentime core emulate periodic mode on top -+ * of oneshot, we get lower overhead and improved accuracy. -+ */ -+ tc_clkevt2_clk_enable(d); -+ -+ /* slow clock, count up to RC, then irq and restart */ -+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_WAVE | -+ ATMEL_TC_CMR_WAVESEL_UPRC, -+ tce.base + ATMEL_TC_CMR(tce.channels[0])); -+ writel((32768 + HZ / 2) / HZ, tce.base + ATMEL_TC_RC(tce.channels[0])); -+ -+ /* Enable clock and interrupts on RC compare */ -+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0])); -+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, -+ tce.base + ATMEL_TC_CCR(tce.channels[0])); -+ -+ return 0; -+} -+ -+static int tc_clkevt2_next_event(unsigned long delta, -+ struct clock_event_device *d) -+{ -+ writel(delta, tce.base + ATMEL_TC_RC(tce.channels[0])); -+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, -+ tce.base + ATMEL_TC_CCR(tce.channels[0])); -+ -+ return 0; -+} -+ -+static irqreturn_t tc_clkevt2_irq(int irq, void *handle) -+{ -+ unsigned int sr; -+ -+ sr = readl(tce.base + ATMEL_TC_SR(tce.channels[0])); -+ if (sr & ATMEL_TC_CPCS) { -+ tce.clkevt.event_handler(&tce.clkevt); -+ return IRQ_HANDLED; -+ } -+ -+ return IRQ_NONE; -+} -+ -+static void tc_clkevt2_suspend(struct clock_event_device *d) -+{ -+ tce.cache[0].cmr = readl(tce.base + ATMEL_TC_CMR(tce.channels[0])); -+ tce.cache[0].imr = readl(tce.base + ATMEL_TC_IMR(tce.channels[0])); -+ tce.cache[0].rc = readl(tce.base + ATMEL_TC_RC(tce.channels[0])); -+ tce.cache[0].clken = !!(readl(tce.base + ATMEL_TC_SR(tce.channels[0])) & -+ ATMEL_TC_CLKSTA); -+} -+ -+static void tc_clkevt2_resume(struct clock_event_device *d) -+{ -+ /* Restore registers for the channel, RA and RB are not used */ -+ writel(tce.cache[0].cmr, tc.base + ATMEL_TC_CMR(tce.channels[0])); -+ writel(tce.cache[0].rc, tc.base + ATMEL_TC_RC(tce.channels[0])); -+ writel(0, tc.base + ATMEL_TC_RA(tce.channels[0])); -+ writel(0, tc.base + ATMEL_TC_RB(tce.channels[0])); -+ /* Disable all the interrupts */ -+ writel(0xff, tc.base + ATMEL_TC_IDR(tce.channels[0])); -+ /* Reenable interrupts that were enabled before suspending */ -+ writel(tce.cache[0].imr, tc.base + ATMEL_TC_IER(tce.channels[0])); -+ -+ /* Start the clock if it was used */ -+ if (tce.cache[0].clken) -+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG, -+ tc.base + ATMEL_TC_CCR(tce.channels[0])); -+} -+ -+static int __init tc_clkevt_register(struct device_node *node, -+ struct regmap *regmap, void __iomem *base, -+ int channel, int irq, int bits) -+{ -+ int ret; -+ struct clk *slow_clk; -+ -+ tce.regmap = regmap; -+ tce.base = base; -+ tce.channels[0] = channel; -+ tce.irq = irq; -+ -+ slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); -+ if (IS_ERR(slow_clk)) -+ return PTR_ERR(slow_clk); -+ -+ ret = clk_prepare_enable(slow_clk); -+ if (ret) -+ return ret; -+ -+ tce.clk[0] = tcb_clk_get(node, tce.channels[0]); -+ if (IS_ERR(tce.clk[0])) { -+ ret = PTR_ERR(tce.clk[0]); -+ goto err_slow; -+ } -+ -+ snprintf(tce.name, sizeof(tce.name), "%s:%d", -+ kbasename(node->parent->full_name), channel); -+ tce.clkevt.cpumask = cpumask_of(0); -+ tce.clkevt.name = tce.name; -+ tce.clkevt.set_next_event = tc_clkevt2_next_event, -+ tce.clkevt.set_state_shutdown = tc_clkevt2_shutdown, -+ tce.clkevt.set_state_periodic = tc_clkevt2_set_periodic, -+ tce.clkevt.set_state_oneshot = tc_clkevt2_set_oneshot, -+ tce.clkevt.suspend = tc_clkevt2_suspend, -+ tce.clkevt.resume = tc_clkevt2_resume, -+ tce.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; -+ tce.clkevt.rating = 140; -+ -+ /* try to enable clk to avoid future errors in mode change */ -+ ret = clk_prepare_enable(tce.clk[0]); -+ if (ret) -+ goto err_slow; -+ clk_disable(tce.clk[0]); -+ -+ clockevents_config_and_register(&tce.clkevt, 32768, 1, -+ CLOCKSOURCE_MASK(bits)); -+ -+ ret = request_irq(tce.irq, tc_clkevt2_irq, IRQF_TIMER | IRQF_SHARED, -+ tce.clkevt.name, &tce); -+ if (ret) -+ goto err_clk; -+ -+ tce.registered = true; -+ -+ return 0; -+ -+err_clk: -+ clk_unprepare(tce.clk[0]); -+err_slow: -+ clk_disable_unprepare(slow_clk); -+ -+ return ret; -+} -+ - /* - * Clocksource and clockevent using the same channel(s) - */ -@@ -363,7 +560,7 @@ static int __init tcb_clksrc_init(struct device_node *node) - int irq, err, chan1 = -1; - unsigned bits; - -- if (tc.registered) -+ if (tc.registered && tce.registered) - return -ENODEV; - - /* -@@ -395,12 +592,22 @@ static int __init tcb_clksrc_init(struct device_node *node) - return irq; - } - -+ if (tc.registered) -+ return tc_clkevt_register(node, regmap, tcb_base, channel, irq, -+ bits); -+ - if (bits == 16) { - of_property_read_u32_index(node, "reg", 1, &chan1); - if (chan1 == -1) { -- pr_err("%s: clocksource needs two channels\n", -- node->parent->full_name); -- return -EINVAL; -+ if (tce.registered) { -+ pr_err("%s: clocksource needs two channels\n", -+ node->parent->full_name); -+ return -EINVAL; -+ } else { -+ return tc_clkevt_register(node, regmap, -+ tcb_base, channel, -+ irq, bits); -+ } - } - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch b/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch deleted file mode 100644 index 68eae4c5a..000000000 --- a/kernel/patches-4.19.x-rt/0004-clocksource-drivers-atmel-pit-make-option-silent.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 23ef2fe8b6933933fb81af9decf35cfae8c14571 Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:21 +0200 -Subject: [PATCH 004/328] clocksource/drivers: atmel-pit: make option silent - -To conform with the other option, make the ATMEL_PIT option silent so it -can be selected from the platform - -Tested-by: Alexander Dahl -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/clocksource/Kconfig | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig -index 0ab22e7037f4..34b07047b91f 100644 ---- a/drivers/clocksource/Kconfig -+++ b/drivers/clocksource/Kconfig -@@ -404,8 +404,11 @@ config ARMV7M_SYSTICK - This options enables support for the ARMv7M system timer unit - - config ATMEL_PIT -+ bool "Microchip ARM Periodic Interval Timer (PIT)" if COMPILE_TEST - select TIMER_OF if OF -- def_bool SOC_AT91SAM9 || SOC_SAMA5 -+ help -+ This enables build of clocksource and clockevent driver for -+ the integrated PIT in Microchip ARM SoCs. - - config ATMEL_ST - bool "Atmel ST timer support" if COMPILE_TEST --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch b/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch deleted file mode 100644 index 704145406..000000000 --- a/kernel/patches-4.19.x-rt/0005-ARM-at91-Implement-clocksource-selection.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 56d1624c2b43a84717f237d3c2d58ac52cb37b33 Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:22 +0200 -Subject: [PATCH 005/328] ARM: at91: Implement clocksource selection - -Allow selecting and unselecting the PIT clocksource driver so it doesn't -have to be compile when unused. - -Tested-by: Alexander Dahl -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/mach-at91/Kconfig | 25 +++++++++++++++++++++++++ - 1 file changed, 25 insertions(+) - -diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig -index 903f23c309df..fa493a86e2bb 100644 ---- a/arch/arm/mach-at91/Kconfig -+++ b/arch/arm/mach-at91/Kconfig -@@ -107,6 +107,31 @@ config SOC_AT91SAM9 - AT91SAM9X35 - AT91SAM9XE - -+comment "Clocksource driver selection" -+ -+config ATMEL_CLOCKSOURCE_PIT -+ bool "Periodic Interval Timer (PIT) support" -+ depends on SOC_AT91SAM9 || SOC_SAMA5 -+ default SOC_AT91SAM9 || SOC_SAMA5 -+ select ATMEL_PIT -+ help -+ Select this to get a clocksource based on the Atmel Periodic Interval -+ Timer. It has a relatively low resolution and the TC Block clocksource -+ should be preferred. -+ -+config ATMEL_CLOCKSOURCE_TCB -+ bool "Timer Counter Blocks (TCB) support" -+ depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 || COMPILE_TEST -+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 -+ depends on !ATMEL_TCLIB -+ select ATMEL_ARM_TCB_CLKSRC -+ help -+ Select this to get a high precision clocksource based on a -+ TC block with a 5+ MHz base clock rate. -+ On platforms with 16-bit counters, two timer channels are combined -+ to make a single 32-bit timer. -+ It can also be used as a clock event device supporting oneshot mode. -+ - config HAVE_AT91_UTMI - bool - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch b/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch deleted file mode 100644 index e34a6a737..000000000 --- a/kernel/patches-4.19.x-rt/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 9591e618026011c31f7275edd0643d390e185e38 Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:23 +0200 -Subject: [PATCH 006/328] ARM: configs: at91: use new TCB timer driver - -Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to -timer-atmel-tcb. - -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/configs/at91_dt_defconfig | 1 - - arch/arm/configs/sama5_defconfig | 1 - - 2 files changed, 2 deletions(-) - -diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig -index e4b1be66b3f5..09f262e59fef 100644 ---- a/arch/arm/configs/at91_dt_defconfig -+++ b/arch/arm/configs/at91_dt_defconfig -@@ -64,7 +64,6 @@ CONFIG_BLK_DEV_LOOP=y - CONFIG_BLK_DEV_RAM=y - CONFIG_BLK_DEV_RAM_COUNT=4 - CONFIG_BLK_DEV_RAM_SIZE=8192 --CONFIG_ATMEL_TCLIB=y - CONFIG_ATMEL_SSC=y - CONFIG_SCSI=y - CONFIG_BLK_DEV_SD=y -diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig -index 2080025556b5..f2bbc6339ca6 100644 ---- a/arch/arm/configs/sama5_defconfig -+++ b/arch/arm/configs/sama5_defconfig -@@ -75,7 +75,6 @@ CONFIG_BLK_DEV_LOOP=y - CONFIG_BLK_DEV_RAM=y - CONFIG_BLK_DEV_RAM_COUNT=4 - CONFIG_BLK_DEV_RAM_SIZE=8192 --CONFIG_ATMEL_TCLIB=y - CONFIG_ATMEL_SSC=y - CONFIG_EEPROM_AT24=y - CONFIG_SCSI=y --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch b/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch deleted file mode 100644 index 94a0aaa1f..000000000 --- a/kernel/patches-4.19.x-rt/0007-ARM-configs-at91-unselect-PIT.patch +++ /dev/null @@ -1,43 +0,0 @@ -From f58179ebd23db67a287e5267a5cbc2c1ae5d75d9 Mon Sep 17 00:00:00 2001 -From: Alexandre Belloni -Date: Thu, 13 Sep 2018 13:30:24 +0200 -Subject: [PATCH 007/328] ARM: configs: at91: unselect PIT - -The PIT is not required anymore to successfully boot and may actually harm -in case preempt-rt is used because the PIT interrupt is shared. -Disable it so the TCB clocksource is used. - -Signed-off-by: Alexandre Belloni -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/configs/at91_dt_defconfig | 1 + - arch/arm/configs/sama5_defconfig | 1 + - 2 files changed, 2 insertions(+) - -diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig -index 09f262e59fef..f4b253bd05ed 100644 ---- a/arch/arm/configs/at91_dt_defconfig -+++ b/arch/arm/configs/at91_dt_defconfig -@@ -19,6 +19,7 @@ CONFIG_ARCH_MULTI_V5=y - CONFIG_ARCH_AT91=y - CONFIG_SOC_AT91RM9200=y - CONFIG_SOC_AT91SAM9=y -+# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set - CONFIG_AEABI=y - CONFIG_UACCESS_WITH_MEMCPY=y - CONFIG_ZBOOT_ROM_TEXT=0x0 -diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig -index f2bbc6339ca6..be92871ab155 100644 ---- a/arch/arm/configs/sama5_defconfig -+++ b/arch/arm/configs/sama5_defconfig -@@ -20,6 +20,7 @@ CONFIG_ARCH_AT91=y - CONFIG_SOC_SAMA5D2=y - CONFIG_SOC_SAMA5D3=y - CONFIG_SOC_SAMA5D4=y -+# CONFIG_ATMEL_CLOCKSOURCE_PIT is not set - CONFIG_AEABI=y - CONFIG_UACCESS_WITH_MEMCPY=y - CONFIG_ZBOOT_ROM_TEXT=0x0 --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch deleted file mode 100644 index 855ce47d4..000000000 --- a/kernel/patches-4.19.x-rt/0008-irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +++ /dev/null @@ -1,170 +0,0 @@ -From f5fc79f507ee8c22a6f18709552cecbada48d328 Mon Sep 17 00:00:00 2001 -From: Marc Zyngier -Date: Fri, 27 Jul 2018 13:38:54 +0100 -Subject: [PATCH 008/328] irqchip/gic-v3-its: Move pending table allocation to - init time - -Signed-off-by: Marc Zyngier -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/irqchip/irq-gic-v3-its.c | 80 +++++++++++++++++++----------- - include/linux/irqchip/arm-gic-v3.h | 1 + - 2 files changed, 53 insertions(+), 28 deletions(-) - -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index bf7b69449b43..f93b8cd5eea2 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -179,6 +179,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); - static DEFINE_IDA(its_vpeid_ida); - - #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) -+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) - #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) - #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) - -@@ -1659,7 +1660,7 @@ static void its_free_prop_table(struct page *prop_page) - get_order(LPI_PROPBASE_SZ)); - } - --static int __init its_alloc_lpi_tables(void) -+static int __init its_alloc_lpi_prop_table(void) - { - phys_addr_t paddr; - -@@ -2007,30 +2008,47 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base) - return val; - } - --static void its_cpu_init_lpis(void) -+static int __init allocate_lpi_tables(void) - { -- void __iomem *rbase = gic_data_rdist_rd_base(); -- struct page *pend_page; -- u64 val, tmp; -+ int err, cpu; - -- /* If we didn't allocate the pending table yet, do it now */ -- pend_page = gic_data_rdist()->pend_page; -- if (!pend_page) { -- phys_addr_t paddr; -+ err = its_alloc_lpi_prop_table(); -+ if (err) -+ return err; -+ -+ /* -+ * We allocate all the pending tables anyway, as we may have a -+ * mix of RDs that have had LPIs enabled, and some that -+ * don't. We'll free the unused ones as each CPU comes online. -+ */ -+ for_each_possible_cpu(cpu) { -+ struct page *pend_page; - - pend_page = its_allocate_pending_table(GFP_NOWAIT); - if (!pend_page) { -- pr_err("Failed to allocate PENDBASE for CPU%d\n", -- smp_processor_id()); -- return; -+ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); -+ return -ENOMEM; - } - -- paddr = page_to_phys(pend_page); -- pr_info("CPU%d: using LPI pending table @%pa\n", -- smp_processor_id(), &paddr); -- gic_data_rdist()->pend_page = pend_page; -+ gic_data_rdist_cpu(cpu)->pend_page = pend_page; - } - -+ return 0; -+} -+ -+static void its_cpu_init_lpis(void) -+{ -+ void __iomem *rbase = gic_data_rdist_rd_base(); -+ struct page *pend_page; -+ phys_addr_t paddr; -+ u64 val, tmp; -+ -+ if (gic_data_rdist()->lpi_enabled) -+ return; -+ -+ pend_page = gic_data_rdist()->pend_page; -+ paddr = page_to_phys(pend_page); -+ - /* set PROPBASE */ - val = (page_to_phys(gic_rdists->prop_page) | - GICR_PROPBASER_InnerShareable | -@@ -2106,6 +2124,10 @@ static void its_cpu_init_lpis(void) - - /* Make sure the GIC has seen the above */ - dsb(sy); -+ gic_data_rdist()->lpi_enabled = true; -+ pr_info("GICv3: CPU%d: using LPI pending table @%pa\n", -+ smp_processor_id(), -+ &paddr); - } - - static void its_cpu_init_collection(struct its_node *its) -@@ -3585,16 +3607,6 @@ static int redist_disable_lpis(void) - u64 timeout = USEC_PER_SEC; - u64 val; - -- /* -- * If coming via a CPU hotplug event, we don't need to disable -- * LPIs before trying to re-enable them. They are already -- * configured and all is well in the world. Detect this case -- * by checking the allocation of the pending table for the -- * current CPU. -- */ -- if (gic_data_rdist()->pend_page) -- return 0; -- - if (!gic_rdists_supports_plpis()) { - pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); - return -ENXIO; -@@ -3604,7 +3616,18 @@ static int redist_disable_lpis(void) - if (!(val & GICR_CTLR_ENABLE_LPIS)) - return 0; - -- pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n", -+ /* -+ * If coming via a CPU hotplug event, we don't need to disable -+ * LPIs before trying to re-enable them. They are already -+ * configured and all is well in the world. -+ */ -+ if (gic_data_rdist()->lpi_enabled) -+ return 0; -+ -+ /* -+ * From that point on, we only try to do some damage control. -+ */ -+ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", - smp_processor_id()); - add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); - -@@ -3860,7 +3883,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, - } - - gic_rdists = rdists; -- err = its_alloc_lpi_tables(); -+ -+ err = allocate_lpi_tables(); - if (err) - return err; - -diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h -index 1d21e98d6854..fdddead7e307 100644 ---- a/include/linux/irqchip/arm-gic-v3.h -+++ b/include/linux/irqchip/arm-gic-v3.h -@@ -585,6 +585,7 @@ struct rdists { - void __iomem *rd_base; - struct page *pend_page; - phys_addr_t phys_base; -+ bool lpi_enabled; - } __percpu *rdist; - struct page *prop_page; - u64 flags; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch b/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch deleted file mode 100644 index 971d152da..000000000 --- a/kernel/patches-4.19.x-rt/0009-kthread-convert-worker-lock-to-raw-spinlock.patch +++ /dev/null @@ -1,202 +0,0 @@ -From 89b3ba99261e5321ba2276305f817b5c0c9817ad Mon Sep 17 00:00:00 2001 -From: Julia Cartwright -Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 009/328] kthread: convert worker lock to raw spinlock - -In order to enable the queuing of kthread work items from hardirq -context even when PREEMPT_RT_FULL is enabled, convert the worker -spin_lock to a raw_spin_lock. - -This is only acceptable to do because the work performed under the lock -is well-bounded and minimal. - -Cc: Sebastian Andrzej Siewior -Cc: Guenter Roeck -Reported-and-tested-by: Steffen Trumtrar -Reported-by: Tim Sander -Signed-off-by: Julia Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/kthread.h | 2 +- - kernel/kthread.c | 42 ++++++++++++++++++++--------------------- - 2 files changed, 22 insertions(+), 22 deletions(-) - -diff --git a/include/linux/kthread.h b/include/linux/kthread.h -index c1961761311d..ad292898f7f2 100644 ---- a/include/linux/kthread.h -+++ b/include/linux/kthread.h -@@ -85,7 +85,7 @@ enum { - - struct kthread_worker { - unsigned int flags; -- spinlock_t lock; -+ raw_spinlock_t lock; - struct list_head work_list; - struct list_head delayed_work_list; - struct task_struct *task; -diff --git a/kernel/kthread.c b/kernel/kthread.c -index 087d18d771b5..5641b55783a6 100644 ---- a/kernel/kthread.c -+++ b/kernel/kthread.c -@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker, - struct lock_class_key *key) - { - memset(worker, 0, sizeof(struct kthread_worker)); -- spin_lock_init(&worker->lock); -+ raw_spin_lock_init(&worker->lock); - lockdep_set_class_and_name(&worker->lock, key, name); - INIT_LIST_HEAD(&worker->work_list); - INIT_LIST_HEAD(&worker->delayed_work_list); -@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr) - - if (kthread_should_stop()) { - __set_current_state(TASK_RUNNING); -- spin_lock_irq(&worker->lock); -+ raw_spin_lock_irq(&worker->lock); - worker->task = NULL; -- spin_unlock_irq(&worker->lock); -+ raw_spin_unlock_irq(&worker->lock); - return 0; - } - - work = NULL; -- spin_lock_irq(&worker->lock); -+ raw_spin_lock_irq(&worker->lock); - if (!list_empty(&worker->work_list)) { - work = list_first_entry(&worker->work_list, - struct kthread_work, node); - list_del_init(&work->node); - } - worker->current_work = work; -- spin_unlock_irq(&worker->lock); -+ raw_spin_unlock_irq(&worker->lock); - - if (work) { - __set_current_state(TASK_RUNNING); -@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker, - bool ret = false; - unsigned long flags; - -- spin_lock_irqsave(&worker->lock, flags); -+ raw_spin_lock_irqsave(&worker->lock, flags); - if (!queuing_blocked(worker, work)) { - kthread_insert_work(worker, work, &worker->work_list); - ret = true; - } -- spin_unlock_irqrestore(&worker->lock, flags); -+ raw_spin_unlock_irqrestore(&worker->lock, flags); - return ret; - } - EXPORT_SYMBOL_GPL(kthread_queue_work); -@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) - if (WARN_ON_ONCE(!worker)) - return; - -- spin_lock(&worker->lock); -+ raw_spin_lock(&worker->lock); - /* Work must not be used with >1 worker, see kthread_queue_work(). */ - WARN_ON_ONCE(work->worker != worker); - -@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) - list_del_init(&work->node); - kthread_insert_work(worker, work, &worker->work_list); - -- spin_unlock(&worker->lock); -+ raw_spin_unlock(&worker->lock); - } - EXPORT_SYMBOL(kthread_delayed_work_timer_fn); - -@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker, - unsigned long flags; - bool ret = false; - -- spin_lock_irqsave(&worker->lock, flags); -+ raw_spin_lock_irqsave(&worker->lock, flags); - - if (!queuing_blocked(worker, work)) { - __kthread_queue_delayed_work(worker, dwork, delay); - ret = true; - } - -- spin_unlock_irqrestore(&worker->lock, flags); -+ raw_spin_unlock_irqrestore(&worker->lock, flags); - return ret; - } - EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); -@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work) - if (!worker) - return; - -- spin_lock_irq(&worker->lock); -+ raw_spin_lock_irq(&worker->lock); - /* Work must not be used with >1 worker, see kthread_queue_work(). */ - WARN_ON_ONCE(work->worker != worker); - -@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work) - else - noop = true; - -- spin_unlock_irq(&worker->lock); -+ raw_spin_unlock_irq(&worker->lock); - - if (!noop) - wait_for_completion(&fwork.done); -@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, - * any queuing is blocked by setting the canceling counter. - */ - work->canceling++; -- spin_unlock_irqrestore(&worker->lock, *flags); -+ raw_spin_unlock_irqrestore(&worker->lock, *flags); - del_timer_sync(&dwork->timer); -- spin_lock_irqsave(&worker->lock, *flags); -+ raw_spin_lock_irqsave(&worker->lock, *flags); - work->canceling--; - } - -@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, - unsigned long flags; - int ret = false; - -- spin_lock_irqsave(&worker->lock, flags); -+ raw_spin_lock_irqsave(&worker->lock, flags); - - /* Do not bother with canceling when never queued. */ - if (!work->worker) -@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, - fast_queue: - __kthread_queue_delayed_work(worker, dwork, delay); - out: -- spin_unlock_irqrestore(&worker->lock, flags); -+ raw_spin_unlock_irqrestore(&worker->lock, flags); - return ret; - } - EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); -@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) - if (!worker) - goto out; - -- spin_lock_irqsave(&worker->lock, flags); -+ raw_spin_lock_irqsave(&worker->lock, flags); - /* Work must not be used with >1 worker, see kthread_queue_work(). */ - WARN_ON_ONCE(work->worker != worker); - -@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) - * In the meantime, block any queuing by setting the canceling counter. - */ - work->canceling++; -- spin_unlock_irqrestore(&worker->lock, flags); -+ raw_spin_unlock_irqrestore(&worker->lock, flags); - kthread_flush_work(work); -- spin_lock_irqsave(&worker->lock, flags); -+ raw_spin_lock_irqsave(&worker->lock, flags); - work->canceling--; - - out_fast: -- spin_unlock_irqrestore(&worker->lock, flags); -+ raw_spin_unlock_irqrestore(&worker->lock, flags); - out: - return ret; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch b/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch deleted file mode 100644 index aa23d75dc..000000000 --- a/kernel/patches-4.19.x-rt/0010-crypto-caam-qi-simplify-CGR-allocation-freeing.patch +++ /dev/null @@ -1,139 +0,0 @@ -From 1e7f9f15b5cb5088ac28a0919a2fcc74bfc5f5c7 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Horia=20Geant=C4=83?= -Date: Mon, 8 Oct 2018 14:09:37 +0300 -Subject: [PATCH 010/328] crypto: caam/qi - simplify CGR allocation, freeing -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -[Upstream commit 29e83c757006fd751966bdc53392bb22d74179c6] - -CGRs (Congestion Groups) have to be freed by the same CPU that -initialized them. -This is why currently the driver takes special measures; however, using -set_cpus_allowed_ptr() is incorrect - as reported by Sebastian. - -Instead of the generic solution of replacing set_cpus_allowed_ptr() with -work_on_cpu_safe(), we use the qman_delete_cgr_safe() QBMan API instead -of qman_delete_cgr() - which internally takes care of proper CGR -deletion. - -Link: https://lkml.kernel.org/r/20181005125443.dfhd2asqktm22ney@linutronix.de -Reported-by: Sebastian Andrzej Siewior -Signed-off-by: Horia Geantă -Signed-off-by: Herbert Xu ---- - drivers/crypto/caam/qi.c | 43 ++++------------------------------------ - drivers/crypto/caam/qi.h | 2 +- - 2 files changed, 5 insertions(+), 40 deletions(-) - -diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c -index 67f7f8c42c93..b84e6c8b1e13 100644 ---- a/drivers/crypto/caam/qi.c -+++ b/drivers/crypto/caam/qi.c -@@ -83,13 +83,6 @@ EXPORT_SYMBOL(caam_congested); - static u64 times_congested; - #endif - --/* -- * CPU from where the module initialised. This is required because QMan driver -- * requires CGRs to be removed from same CPU from where they were originally -- * allocated. -- */ --static int mod_init_cpu; -- - /* - * This is a a cache of buffers, from which the users of CAAM QI driver - * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than -@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) - } - EXPORT_SYMBOL(caam_drv_ctx_rel); - --int caam_qi_shutdown(struct device *qidev) -+void caam_qi_shutdown(struct device *qidev) - { -- int i, ret; -+ int i; - struct caam_qi_priv *priv = dev_get_drvdata(qidev); - const cpumask_t *cpus = qman_affine_cpus(); -- struct cpumask old_cpumask = current->cpus_allowed; - - for_each_cpu(i, cpus) { - struct napi_struct *irqtask; -@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev) - dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); - } - -- /* -- * QMan driver requires CGRs to be deleted from same CPU from where they -- * were instantiated. Hence we get the module removal execute from the -- * same CPU from where it was originally inserted. -- */ -- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); -- -- ret = qman_delete_cgr(&priv->cgr); -- if (ret) -- dev_err(qidev, "Deletion of CGR failed: %d\n", ret); -- else -- qman_release_cgrid(priv->cgr.cgrid); -+ qman_delete_cgr_safe(&priv->cgr); -+ qman_release_cgrid(priv->cgr.cgrid); - - kmem_cache_destroy(qi_cache); - -- /* Now that we're done with the CGRs, restore the cpus allowed mask */ -- set_cpus_allowed_ptr(current, &old_cpumask); -- - platform_device_unregister(priv->qi_pdev); -- return ret; - } - - static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) -@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev) - struct device *ctrldev = &caam_pdev->dev, *qidev; - struct caam_drv_private *ctrlpriv; - const cpumask_t *cpus = qman_affine_cpus(); -- struct cpumask old_cpumask = current->cpus_allowed; - static struct platform_device_info qi_pdev_info = { - .name = "caam_qi", - .id = PLATFORM_DEVID_NONE - }; - -- /* -- * QMAN requires CGRs to be removed from same CPU+portal from where it -- * was originally allocated. Hence we need to note down the -- * initialisation CPU and use the same CPU for module exit. -- * We select the first CPU to from the list of portal owning CPUs. -- * Then we pin module init to this CPU. -- */ -- mod_init_cpu = cpumask_first(cpus); -- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); -- - qi_pdev_info.parent = ctrldev; - qi_pdev_info.dma_mask = dma_get_mask(ctrldev); - qi_pdev = platform_device_register_full(&qi_pdev_info); -@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev) - return -ENOMEM; - } - -- /* Done with the CGRs; restore the cpus allowed mask */ -- set_cpus_allowed_ptr(current, &old_cpumask); - #ifdef CONFIG_DEBUG_FS - debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, - ×_congested, &caam_fops_u64_ro); -diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h -index 357b69f57072..b6c8acc30853 100644 ---- a/drivers/crypto/caam/qi.h -+++ b/drivers/crypto/caam/qi.h -@@ -174,7 +174,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc); - void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx); - - int caam_qi_init(struct platform_device *pdev); --int caam_qi_shutdown(struct device *dev); -+void caam_qi_shutdown(struct device *dev); - - /** - * qi_cache_alloc - Allocate buffers from CAAM-QI cache --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch deleted file mode 100644 index e25fabcbe..000000000 --- a/kernel/patches-4.19.x-rt/0011-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 2a9fed89a7bea6fbe31e717ab5f277405e20826e Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Mon, 7 Jan 2019 13:52:31 +0100 -Subject: [PATCH 011/328] sched/fair: Robustify CFS-bandwidth timer locking - -Traditionally hrtimer callbacks were run with IRQs disabled, but with -the introduction of HRTIMER_MODE_SOFT it is possible they run from -SoftIRQ context, which does _NOT_ have IRQs disabled. - -Allow for the CFS bandwidth timers (period_timer and slack_timer) to -be ran from SoftIRQ context; this entails removing the assumption that -IRQs are already disabled from the locking. - -While mainline doesn't strictly need this, -RT forces all timers not -explicitly marked with MODE_HARD into MODE_SOFT and trips over this. -And marking these timers as MODE_HARD doesn't make sense as they're -not required for RT operation and can potentially be quite expensive. - -Cc: Ingo Molnar -Cc: Thomas Gleixner -Cc: Sebastian Andrzej Siewior -Reported-by: Tom Putzeys -Tested-by: Mike Galbraith -Signed-off-by: Peter Zijlstra (Intel) -Link: https://lkml.kernel.org/r/20190107125231.GE14122@hirez.programming.kicks-ass.net -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/fair.c | 30 ++++++++++++++++-------------- - 1 file changed, 16 insertions(+), 14 deletions(-) - -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 7f4f4ab5bfef..0f1ba3d72336 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -4576,7 +4576,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) - struct rq *rq = rq_of(cfs_rq); - struct rq_flags rf; - -- rq_lock(rq, &rf); -+ rq_lock_irqsave(rq, &rf); - if (!cfs_rq_throttled(cfs_rq)) - goto next; - -@@ -4595,7 +4595,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) - unthrottle_cfs_rq(cfs_rq); - - next: -- rq_unlock(rq, &rf); -+ rq_unlock_irqrestore(rq, &rf); - - if (!remaining) - break; -@@ -4611,7 +4611,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) - * period the timer is deactivated until scheduling resumes; cfs_b->idle is - * used to track this state. - */ --static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) -+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) - { - u64 runtime; - int throttled; -@@ -4651,10 +4651,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) - while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { - runtime = cfs_b->runtime; - cfs_b->distribute_running = 1; -- raw_spin_unlock(&cfs_b->lock); -+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags); - /* we can't nest cfs_b->lock while distributing bandwidth */ - runtime = distribute_cfs_runtime(cfs_b, runtime); -- raw_spin_lock(&cfs_b->lock); -+ raw_spin_lock_irqsave(&cfs_b->lock, flags); - - cfs_b->distribute_running = 0; - throttled = !list_empty(&cfs_b->throttled_cfs_rq); -@@ -4762,16 +4762,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) - static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) - { - u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); -+ unsigned long flags; - - /* confirm we're still not at a refresh boundary */ -- raw_spin_lock(&cfs_b->lock); -+ raw_spin_lock_irqsave(&cfs_b->lock, flags); - if (cfs_b->distribute_running) { -- raw_spin_unlock(&cfs_b->lock); -+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags); - return; - } - - if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { -- raw_spin_unlock(&cfs_b->lock); -+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags); - return; - } - -@@ -4781,17 +4782,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) - if (runtime) - cfs_b->distribute_running = 1; - -- raw_spin_unlock(&cfs_b->lock); -+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags); - - if (!runtime) - return; - - runtime = distribute_cfs_runtime(cfs_b, runtime); - -- raw_spin_lock(&cfs_b->lock); -+ raw_spin_lock_irqsave(&cfs_b->lock, flags); - cfs_b->runtime -= min(runtime, cfs_b->runtime); - cfs_b->distribute_running = 0; -- raw_spin_unlock(&cfs_b->lock); -+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags); - } - - /* -@@ -4871,11 +4872,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) - { - struct cfs_bandwidth *cfs_b = - container_of(timer, struct cfs_bandwidth, period_timer); -+ unsigned long flags; - int overrun; - int idle = 0; - int count = 0; - -- raw_spin_lock(&cfs_b->lock); -+ raw_spin_lock_irqsave(&cfs_b->lock, flags); - for (;;) { - overrun = hrtimer_forward_now(timer, cfs_b->period); - if (!overrun) -@@ -4911,11 +4913,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) - count = 0; - } - -- idle = do_sched_cfs_period_timer(cfs_b, overrun); -+ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); - } - if (idle) - cfs_b->period_active = 0; -- raw_spin_unlock(&cfs_b->lock); -+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags); - - return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch b/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch deleted file mode 100644 index 1bcf9b842..000000000 --- a/kernel/patches-4.19.x-rt/0012-arm-Convert-arm-boot_lock-to-raw.patch +++ /dev/null @@ -1,431 +0,0 @@ -From 7c89d978bdfea369853567288ced4880deddd0b1 Mon Sep 17 00:00:00 2001 -From: Frank Rowand -Date: Mon, 19 Sep 2011 14:51:14 -0700 -Subject: [PATCH 012/328] arm: Convert arm boot_lock to raw - -The arm boot_lock is used by the secondary processor startup code. The locking -task is the idle thread, which has idle->sched_class == &idle_sched_class. -idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the -lock, the attempt to wake it when the lock becomes available will fail: - -try_to_wake_up() - ... - activate_task() - enqueue_task() - p->sched_class->enqueue_task(rq, p, flags) - -Fix by converting boot_lock to a raw spin lock. - -Signed-off-by: Frank Rowand -Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com -Signed-off-by: Thomas Gleixner -Tested-by: Tony Lindgren -Acked-by: Krzysztof Kozlowski -Tested-by: Krzysztof Kozlowski [Exynos5422 Linaro PM-QA] -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/mach-exynos/platsmp.c | 12 ++++++------ - arch/arm/mach-hisi/platmcpm.c | 22 +++++++++++----------- - arch/arm/mach-omap2/omap-smp.c | 10 +++++----- - arch/arm/mach-prima2/platsmp.c | 10 +++++----- - arch/arm/mach-qcom/platsmp.c | 10 +++++----- - arch/arm/mach-spear/platsmp.c | 10 +++++----- - arch/arm/mach-sti/platsmp.c | 10 +++++----- - arch/arm/plat-versatile/platsmp.c | 10 +++++----- - 8 files changed, 47 insertions(+), 47 deletions(-) - -diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c -index 6a1e682371b3..17dca0ff336e 100644 ---- a/arch/arm/mach-exynos/platsmp.c -+++ b/arch/arm/mach-exynos/platsmp.c -@@ -239,7 +239,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void exynos_secondary_init(unsigned int cpu) - { -@@ -252,8 +252,8 @@ static void exynos_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) -@@ -317,7 +317,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -344,7 +344,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) - - if (timeout == 0) { - printk(KERN_ERR "cpu1 power enable failed"); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return -ETIMEDOUT; - } - } -@@ -390,7 +390,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) - * calibrations, then wait for it to finish - */ - fail: -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? ret : 0; - } -diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c -index f66815c3dd07..00524abd963f 100644 ---- a/arch/arm/mach-hisi/platmcpm.c -+++ b/arch/arm/mach-hisi/platmcpm.c -@@ -61,7 +61,7 @@ - - static void __iomem *sysctrl, *fabric; - static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - static u32 fabric_phys_addr; - /* - * [0]: bootwrapper physical address -@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) - if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) - return -EINVAL; - -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - - if (hip04_cpu_table[cluster][cpu]) - goto out; -@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) - - out: - hip04_cpu_table[cluster][cpu]++; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - - return 0; - } -@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu) - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - hip04_cpu_table[cluster][cpu]--; - if (hip04_cpu_table[cluster][cpu] == 1) { - /* A power_up request went ahead of us. */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return; - } else if (hip04_cpu_table[cluster][cpu] > 1) { - pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); -@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu) - } - - last_man = hip04_cluster_is_down(cluster); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - if (last_man) { - /* Since it's Cortex A15, disable L2 prefetching. */ - asm volatile( -@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu) - cpu >= HIP04_MAX_CPUS_PER_CLUSTER); - - count = TIMEOUT_MSEC / POLL_MSEC; -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - for (tries = 0; tries < count; tries++) { - if (hip04_cpu_table[cluster][cpu]) - goto err; -@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) - data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); - if (data & CORE_WFI_STATUS(cpu)) - break; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - /* Wait for clean L2 when the whole cluster is down. */ - msleep(POLL_MSEC); -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - } - if (tries >= count) - goto err; -@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) - goto err; - if (hip04_cluster_is_down(cluster)) - hip04_set_snoop_filter(cluster, 0); -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 1; - err: -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 0; - } - #endif -diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c -index 1c73694c871a..ac4d2f030b87 100644 ---- a/arch/arm/mach-omap2/omap-smp.c -+++ b/arch/arm/mach-omap2/omap-smp.c -@@ -69,7 +69,7 @@ static const struct omap_smp_config omap5_cfg __initconst = { - .startup_addr = omap5_secondary_startup, - }; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void __iomem *omap4_get_scu_base(void) - { -@@ -177,8 +177,8 @@ static void omap4_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -191,7 +191,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Update the AuxCoreBoot0 with boot state for secondary core. -@@ -270,7 +270,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) - * Now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return 0; - } -diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c -index 75ef5d4be554..c17c86e5d860 100644 ---- a/arch/arm/mach-prima2/platsmp.c -+++ b/arch/arm/mach-prima2/platsmp.c -@@ -22,7 +22,7 @@ - - static void __iomem *clk_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sirfsoc_secondary_init(unsigned int cpu) - { -@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static const struct of_device_id clk_ids[] = { -@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) - /* make sure write buffer is drained */ - mb(); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c -index 5494c9e0c909..e8ce157d3548 100644 ---- a/arch/arm/mach-qcom/platsmp.c -+++ b/arch/arm/mach-qcom/platsmp.c -@@ -46,7 +46,7 @@ - - extern void secondary_startup_arm(void); - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - #ifdef CONFIG_HOTPLUG_CPU - static void qcom_cpu_die(unsigned int cpu) -@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int scss_release_secondary(unsigned int cpu) -@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Send the secondary CPU a soft interrupt, thereby causing -@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return ret; - } -diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c -index 39038a03836a..6da5c93872bf 100644 ---- a/arch/arm/mach-spear/platsmp.c -+++ b/arch/arm/mach-spear/platsmp.c -@@ -32,7 +32,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void __iomem *scu_base = IOMEM(VA_SCU_BASE); - -@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c -index 231f19e17436..a3419b7003e6 100644 ---- a/arch/arm/mach-sti/platsmp.c -+++ b/arch/arm/mach-sti/platsmp.c -@@ -35,7 +35,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sti_secondary_init(unsigned int cpu) - { -@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c -index c2366510187a..6b60f582b738 100644 ---- a/arch/arm/plat-versatile/platsmp.c -+++ b/arch/arm/plat-versatile/platsmp.c -@@ -32,7 +32,7 @@ static void write_pen_release(int val) - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void versatile_secondary_init(unsigned int cpu) - { -@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu) - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * This is really belt and braces; we hold unintended secondary -@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch b/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch deleted file mode 100644 index d5caf7030..000000000 --- a/kernel/patches-4.19.x-rt/0013-x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 9ecaf2a8f433399cc3fabcfb9fbce9a88fe6f200 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 17 Jul 2018 18:25:31 +0200 -Subject: [PATCH 013/328] x86/ioapic: Don't let setaffinity unmask threaded EOI - interrupt too early - -There is an issue with threaded interrupts which are marked ONESHOT -and using the fasteoi handler. - - if (IS_ONESHOT()) - mask_irq(); - - .... - .... - - cond_unmask_eoi_irq() - chip->irq_eoi(); - -So if setaffinity is pending then the interrupt will be moved and then -unmasked, which is wrong as it should be kept masked up to the point where -the threaded handler finished. It's not a real problem, the interrupt will -just be able to fire before the threaded handler has finished, though the irq -masked state will be wrong for a bit. - -The patch below should cure the issue. It also renames the horribly -misnomed functions so it becomes clear what they are supposed to do. - -Signed-off-by: Thomas Gleixner -[bigeasy: add the body of the patch, use the same functions in both - ifdef paths (spotted by Andy Shevchenko)] -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/kernel/apic/io_apic.c | 16 ++++++++-------- - 1 file changed, 8 insertions(+), 8 deletions(-) - -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index fa3b85b222e3..1bdad61a3ef7 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -1722,7 +1722,7 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) - return false; - } - --static inline bool ioapic_irqd_mask(struct irq_data *data) -+static inline bool ioapic_prepare_move(struct irq_data *data) - { - /* If we are moving the IRQ we need to mask it */ - if (unlikely(irqd_is_setaffinity_pending(data))) { -@@ -1733,9 +1733,9 @@ static inline bool ioapic_irqd_mask(struct irq_data *data) - return false; - } - --static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) -+static inline void ioapic_finish_move(struct irq_data *data, bool moveit) - { -- if (unlikely(masked)) { -+ if (unlikely(moveit)) { - /* Only migrate the irq if the ack has been received. - * - * On rare occasions the broadcast level triggered ack gets -@@ -1770,11 +1770,11 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) - } - } - #else --static inline bool ioapic_irqd_mask(struct irq_data *data) -+static inline bool ioapic_prepare_move(struct irq_data *data) - { - return false; - } --static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked) -+static inline void ioapic_finish_move(struct irq_data *data, bool moveit) - { - } - #endif -@@ -1783,11 +1783,11 @@ static void ioapic_ack_level(struct irq_data *irq_data) - { - struct irq_cfg *cfg = irqd_cfg(irq_data); - unsigned long v; -- bool masked; -+ bool moveit; - int i; - - irq_complete_move(cfg); -- masked = ioapic_irqd_mask(irq_data); -+ moveit = ioapic_prepare_move(irq_data); - - /* - * It appears there is an erratum which affects at least version 0x11 -@@ -1842,7 +1842,7 @@ static void ioapic_ack_level(struct irq_data *irq_data) - eoi_ioapic_pin(cfg->vector, irq_data->chip_data); - } - -- ioapic_irqd_unmask(irq_data, masked); -+ ioapic_finish_move(irq_data, moveit); - } - - static void ioapic_ir_ack_level(struct irq_data *irq_data) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch deleted file mode 100644 index 3d2838508..000000000 --- a/kernel/patches-4.19.x-rt/0014-cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 759e6d7c318bbcff7507641d5a9fb6b5074b2a87 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 3 Jul 2018 18:19:48 +0200 -Subject: [PATCH 014/328] cgroup: use irqsave in cgroup_rstat_flush_locked() - -All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock -either with spin_lock_irq() or spin_lock_irqsave(). -cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which -is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated() -in IRQ context and therefore requires _irqsave() locking suffix in -cgroup_rstat_flush_locked(). -Since there is no difference between spin_lock_t and raw_spin_lock_t -on !RT lockdep does not complain here. On RT lockdep complains because -the interrupts were not disabled here and a deadlock is possible. - -Acquire the raw_spin_lock_t with disabled interrupts. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cgroup/rstat.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c -index bb95a35e8c2d..3266a9781b4e 100644 ---- a/kernel/cgroup/rstat.c -+++ b/kernel/cgroup/rstat.c -@@ -159,8 +159,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) - raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, - cpu); - struct cgroup *pos = NULL; -+ unsigned long flags; - -- raw_spin_lock(cpu_lock); -+ raw_spin_lock_irqsave(cpu_lock, flags); - while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) { - struct cgroup_subsys_state *css; - -@@ -172,7 +173,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) - css->ss->css_rstat_flush(css, cpu); - rcu_read_unlock(); - } -- raw_spin_unlock(cpu_lock); -+ raw_spin_unlock_irqrestore(cpu_lock, flags); - - /* if @may_sleep, play nice and yield if necessary */ - if (may_sleep && (need_resched() || --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch deleted file mode 100644 index f95c71d80..000000000 --- a/kernel/patches-4.19.x-rt/0015-fscache-initialize-cookie-hash-table-raw-spinlocks.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 934128f28dd37073d6513a37f0433df6399c7953 Mon Sep 17 00:00:00 2001 -From: Clark Williams -Date: Tue, 3 Jul 2018 13:34:30 -0500 -Subject: [PATCH 015/328] fscache: initialize cookie hash table raw spinlocks - -The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The -PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT -the structures get used uninitialized, causing warnings about bad magic numbers -when spinlock debugging is turned on. - -Use the init function for fscache cookies. - -Signed-off-by: Clark Williams -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/fscache/cookie.c | 8 ++++++++ - fs/fscache/main.c | 1 + - include/linux/fscache.h | 1 + - 3 files changed, 10 insertions(+) - -diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c -index c550512ce335..d5d57da32ffa 100644 ---- a/fs/fscache/cookie.c -+++ b/fs/fscache/cookie.c -@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie, - return -ESTALE; - } - EXPORT_SYMBOL(__fscache_check_consistency); -+ -+void __init fscache_cookie_init(void) -+{ -+ int i; -+ -+ for (i = 0; i < (1 << fscache_cookie_hash_shift) - 1; i++) -+ INIT_HLIST_BL_HEAD(&fscache_cookie_hash[i]); -+} -diff --git a/fs/fscache/main.c b/fs/fscache/main.c -index 30ad89db1efc..1d5f1d679ffa 100644 ---- a/fs/fscache/main.c -+++ b/fs/fscache/main.c -@@ -149,6 +149,7 @@ static int __init fscache_init(void) - ret = -ENOMEM; - goto error_cookie_jar; - } -+ fscache_cookie_init(); - - fscache_root = kobject_create_and_add("fscache", kernel_kobj); - if (!fscache_root) -diff --git a/include/linux/fscache.h b/include/linux/fscache.h -index 84b90a79d75a..87a9330eafa2 100644 ---- a/include/linux/fscache.h -+++ b/include/linux/fscache.h -@@ -230,6 +230,7 @@ extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, - extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); - extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, - bool (*)(void *), void *); -+extern void fscache_cookie_init(void); - - /** - * fscache_register_netfs - Register a filesystem as desiring caching services --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch b/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch deleted file mode 100644 index 20e23203e..000000000 --- a/kernel/patches-4.19.x-rt/0016-Drivers-hv-vmbus-include-header-for-get_irq_regs.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 2a2f1a8c287a6b6fb14a4a1b5583e043d5897df4 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 29 Aug 2018 21:59:04 +0200 -Subject: [PATCH 016/328] Drivers: hv: vmbus: include header for get_irq_regs() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -On !RT the header file get_irq_regs() gets pulled in via other header files. On -RT it does not and the build fails: - - drivers/hv/vmbus_drv.c:975 implicit declaration of function ‘get_irq_regs’ [-Werror=implicit-function-declaration] - drivers/hv/hv.c:115 implicit declaration of function ‘get_irq_regs’ [-Werror=implicit-function-declaration] - -Add the header file for get_irq_regs() in a common header so it used by -vmbus_drv.c by hv.c for their get_irq_regs() usage. - -Reported-by: Bernhard Landauer -Reported-by: Ralf Ramsauer -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/hv/hyperv_vmbus.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h -index 87d3d7da78f8..1d2d8a4b837d 100644 ---- a/drivers/hv/hyperv_vmbus.h -+++ b/drivers/hv/hyperv_vmbus.h -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - - #include "hv_trace.h" - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch b/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch deleted file mode 100644 index 8d53fc488..000000000 --- a/kernel/patches-4.19.x-rt/0017-percpu-include-irqflags.h-for-raw_local_irq_save.patch +++ /dev/null @@ -1,32 +0,0 @@ -From d487edd01d698abf2b4f3ea4e3f27897b227250c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 11 Oct 2018 16:39:59 +0200 -Subject: [PATCH 017/328] percpu: include irqflags.h for raw_local_irq_save() - -The header percpu.h header file is using raw_local_irq_save() but does -not include irqflags.h for its definition. It compiles because the -header file is included via an other header file. -On -RT the build fails because raw_local_irq_save() is not defined. - -Include irqflags.h in percpu.h. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/asm-generic/percpu.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h -index 1817a8415a5e..942d64c0476e 100644 ---- a/include/asm-generic/percpu.h -+++ b/include/asm-generic/percpu.h -@@ -5,6 +5,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_SMP - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch b/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch deleted file mode 100644 index f2953cabf..000000000 --- a/kernel/patches-4.19.x-rt/0018-efi-Allow-efi-runtime.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 5c77a75aaa23c5fc32b5485897d0d14e66fafd37 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 26 Jul 2018 15:06:10 +0200 -Subject: [PATCH 018/328] efi: Allow efi=runtime - -In case the option "efi=noruntime" is default at built-time, the user -could overwrite its sate by `efi=runtime' and allow it again. - -Acked-by: Ard Biesheuvel -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/firmware/efi/efi.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c -index d54fca902e64..5db20908aa9c 100644 ---- a/drivers/firmware/efi/efi.c -+++ b/drivers/firmware/efi/efi.c -@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char *str) - if (parse_option_str(str, "noruntime")) - disable_runtime = true; - -+ if (parse_option_str(str, "runtime")) -+ disable_runtime = false; -+ - return 0; - } - early_param("efi", parse_efi_cmdline); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch b/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch deleted file mode 100644 index 1e792ebbc..000000000 --- a/kernel/patches-4.19.x-rt/0019-x86-efi-drop-task_lock-from-efi_switch_mm.patch +++ /dev/null @@ -1,54 +0,0 @@ -From af50891c552632469b09b7b97abd197545aec804 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 24 Jul 2018 14:48:55 +0200 -Subject: [PATCH 019/328] x86/efi: drop task_lock() from efi_switch_mm() - -efi_switch_mm() is a wrapper around switch_mm() which saves current's -->active_mm, sets the requests mm as ->active_mm and invokes -switch_mm(). -I don't think that task_lock() is required during that procedure. It -protects ->mm which isn't changed here. - -It needs to be mentioned that during the whole procedure (switch to -EFI's mm and back) the preemption needs to be disabled. A context switch -at this point would reset the cr3 value based on current->mm. Also, this -function may not be invoked at the same time on a different CPU because -it would overwrite the efi_scratch.prev_mm information. - -Remove task_lock() and also update the comment to reflect it. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/platform/efi/efi_64.c | 10 ++++------ - 1 file changed, 4 insertions(+), 6 deletions(-) - -diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c -index 6db8f3598c80..c9ccaef8df57 100644 ---- a/arch/x86/platform/efi/efi_64.c -+++ b/arch/x86/platform/efi/efi_64.c -@@ -620,18 +620,16 @@ void __init efi_dump_pagetable(void) - - /* - * Makes the calling thread switch to/from efi_mm context. Can be used -- * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well -- * as during efi runtime calls i.e current->active_mm == current_mm. -- * We are not mm_dropping()/mm_grabbing() any mm, because we are not -- * losing/creating any references. -+ * in a kernel thread and user context. Preemption needs to remain disabled -+ * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm -+ * can not change under us. -+ * It should be ensured that there are no concurent calls to this function. - */ - void efi_switch_mm(struct mm_struct *mm) - { -- task_lock(current); - efi_scratch.prev_mm = current->active_mm; - current->active_mm = mm; - switch_mm(efi_scratch.prev_mm, mm, NULL); -- task_unlock(current); - } - - #ifdef CONFIG_EFI_MIXED --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch b/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch deleted file mode 100644 index b47cd87bf..000000000 --- a/kernel/patches-4.19.x-rt/0020-arm64-KVM-compute_layout-before-altenates-are-applie.patch +++ /dev/null @@ -1,82 +0,0 @@ -From c96c598b9bc12e2909dcec0a1bf8f4a1b846107e Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 26 Jul 2018 09:13:42 +0200 -Subject: [PATCH 020/328] arm64: KVM: compute_layout before altenates are - applied - -compute_layout() is invoked as part of an alternative fixup under -stop_machine() and needs a sleeping lock as part of get_random_long(). - -Invoke compute_layout() before the alternatives are applied. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm64/include/asm/alternative.h | 6 ++++++ - arch/arm64/kernel/alternative.c | 1 + - arch/arm64/kvm/va_layout.c | 7 +------ - 3 files changed, 8 insertions(+), 6 deletions(-) - -diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h -index 887a8512bf10..376561351bae 100644 ---- a/arch/arm64/include/asm/alternative.h -+++ b/arch/arm64/include/asm/alternative.h -@@ -35,6 +35,12 @@ void apply_alternatives_module(void *start, size_t length); - static inline void apply_alternatives_module(void *start, size_t length) { } - #endif - -+#ifdef CONFIG_KVM_ARM_HOST -+void kvm_compute_layout(void); -+#else -+static inline void kvm_compute_layout(void) { } -+#endif -+ - #define ALTINSTR_ENTRY(feature) \ - " .word 661b - .\n" /* label */ \ - " .word 663f - .\n" /* new instruction */ \ -diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c -index b5d603992d40..f92815d56d17 100644 ---- a/arch/arm64/kernel/alternative.c -+++ b/arch/arm64/kernel/alternative.c -@@ -224,6 +224,7 @@ static int __apply_alternatives_multi_stop(void *unused) - void __init apply_alternatives_all(void) - { - /* better not try code patching on a live SMP system */ -+ kvm_compute_layout(); - stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); - } - -diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c -index c712a7376bc1..792da0e125de 100644 ---- a/arch/arm64/kvm/va_layout.c -+++ b/arch/arm64/kvm/va_layout.c -@@ -33,7 +33,7 @@ static u8 tag_lsb; - static u64 tag_val; - static u64 va_mask; - --static void compute_layout(void) -+__init void kvm_compute_layout(void) - { - phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); - u64 hyp_va_msb; -@@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, - - BUG_ON(nr_inst != 5); - -- if (!has_vhe() && !va_mask) -- compute_layout(); - - for (i = 0; i < nr_inst; i++) { - u32 rd, rn, insn, oinsn; -@@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, - return; - } - -- if (!va_mask) -- compute_layout(); -- - /* - * Compute HYP VA by using the same computation as kern_hyp_va() - */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch deleted file mode 100644 index 2776fca61..000000000 --- a/kernel/patches-4.19.x-rt/0021-of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ /dev/null @@ -1,102 +0,0 @@ -From 8779fdd5686d1f9be670c7ee5ea6dfaece9e37d8 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 31 Aug 2018 14:16:30 +0200 -Subject: [PATCH 021/328] of: allocate / free phandle cache outside of the - devtree_lock - -The phandle cache code allocates memory while holding devtree_lock which -is a raw_spinlock_t. Memory allocation (and free()) is not possible on -RT while a raw_spinlock_t is held. -Invoke the kfree() and kcalloc() while the lock is dropped. - -Cc: Rob Herring -Cc: Frank Rowand -Cc: devicetree@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/of/base.c | 19 +++++++++++++------ - 1 file changed, 13 insertions(+), 6 deletions(-) - -diff --git a/drivers/of/base.c b/drivers/of/base.c -index f0dbb7ad88cf..c59b30bab0e0 100644 ---- a/drivers/of/base.c -+++ b/drivers/of/base.c -@@ -130,31 +130,34 @@ static u32 phandle_cache_mask; - /* - * Caller must hold devtree_lock. - */ --static void __of_free_phandle_cache(void) -+static struct device_node** __of_free_phandle_cache(void) - { - u32 cache_entries = phandle_cache_mask + 1; - u32 k; -+ struct device_node **shadow; - - if (!phandle_cache) -- return; -+ return NULL; - - for (k = 0; k < cache_entries; k++) - of_node_put(phandle_cache[k]); - -- kfree(phandle_cache); -+ shadow = phandle_cache; - phandle_cache = NULL; -+ return shadow; - } - - int of_free_phandle_cache(void) - { - unsigned long flags; -+ struct device_node **shadow; - - raw_spin_lock_irqsave(&devtree_lock, flags); - -- __of_free_phandle_cache(); -+ shadow = __of_free_phandle_cache(); - - raw_spin_unlock_irqrestore(&devtree_lock, flags); -- -+ kfree(shadow); - return 0; - } - #if !defined(CONFIG_MODULES) -@@ -189,10 +192,11 @@ void of_populate_phandle_cache(void) - u32 cache_entries; - struct device_node *np; - u32 phandles = 0; -+ struct device_node **shadow; - - raw_spin_lock_irqsave(&devtree_lock, flags); - -- __of_free_phandle_cache(); -+ shadow = __of_free_phandle_cache(); - - for_each_of_allnodes(np) - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) -@@ -200,12 +204,14 @@ void of_populate_phandle_cache(void) - - if (!phandles) - goto out; -+ raw_spin_unlock_irqrestore(&devtree_lock, flags); - - cache_entries = roundup_pow_of_two(phandles); - phandle_cache_mask = cache_entries - 1; - - phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), - GFP_ATOMIC); -+ raw_spin_lock_irqsave(&devtree_lock, flags); - if (!phandle_cache) - goto out; - -@@ -217,6 +223,7 @@ void of_populate_phandle_cache(void) - - out: - raw_spin_unlock_irqrestore(&devtree_lock, flags); -+ kfree(shadow); - } - - void __init of_core_init(void) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch deleted file mode 100644 index 32dadfc61..000000000 --- a/kernel/patches-4.19.x-rt/0022-mm-kasan-make-quarantine_lock-a-raw_spinlock_t.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 7841950d4460ea93ee4ddd6a400ad67cfacee592 Mon Sep 17 00:00:00 2001 -From: Clark Williams -Date: Tue, 18 Sep 2018 10:29:31 -0500 -Subject: [PATCH 022/328] mm/kasan: make quarantine_lock a raw_spinlock_t - -The static lock quarantine_lock is used in quarantine.c to protect the -quarantine queue datastructures. It is taken inside quarantine queue -manipulation routines (quarantine_put(), quarantine_reduce() and -quarantine_remove_cache()), with IRQs disabled. -This is not a problem on a stock kernel but is problematic on an RT -kernel where spin locks are sleeping spinlocks, which can sleep and can -not be acquired with disabled interrupts. - -Convert the quarantine_lock to a raw spinlock_t. The usage of -quarantine_lock is confined to quarantine.c and the work performed while -the lock is held is limited. - -Signed-off-by: Clark Williams -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/kasan/quarantine.c | 18 +++++++++--------- - 1 file changed, 9 insertions(+), 9 deletions(-) - -diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c -index 3a8ddf8baf7d..b209dbaefde8 100644 ---- a/mm/kasan/quarantine.c -+++ b/mm/kasan/quarantine.c -@@ -103,7 +103,7 @@ static int quarantine_head; - static int quarantine_tail; - /* Total size of all objects in global_quarantine across all batches. */ - static unsigned long quarantine_size; --static DEFINE_SPINLOCK(quarantine_lock); -+static DEFINE_RAW_SPINLOCK(quarantine_lock); - DEFINE_STATIC_SRCU(remove_cache_srcu); - - /* Maximum size of the global queue. */ -@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) - if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { - qlist_move_all(q, &temp); - -- spin_lock(&quarantine_lock); -+ raw_spin_lock(&quarantine_lock); - WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); - qlist_move_all(&temp, &global_quarantine[quarantine_tail]); - if (global_quarantine[quarantine_tail].bytes >= -@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) - if (new_tail != quarantine_head) - quarantine_tail = new_tail; - } -- spin_unlock(&quarantine_lock); -+ raw_spin_unlock(&quarantine_lock); - } - - local_irq_restore(flags); -@@ -230,7 +230,7 @@ void quarantine_reduce(void) - * expected case). - */ - srcu_idx = srcu_read_lock(&remove_cache_srcu); -- spin_lock_irqsave(&quarantine_lock, flags); -+ raw_spin_lock_irqsave(&quarantine_lock, flags); - - /* - * Update quarantine size in case of hotplug. Allocate a fraction of -@@ -254,7 +254,7 @@ void quarantine_reduce(void) - quarantine_head = 0; - } - -- spin_unlock_irqrestore(&quarantine_lock, flags); -+ raw_spin_unlock_irqrestore(&quarantine_lock, flags); - - qlist_free_all(&to_free, NULL); - srcu_read_unlock(&remove_cache_srcu, srcu_idx); -@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache) - */ - on_each_cpu(per_cpu_remove_cache, cache, 1); - -- spin_lock_irqsave(&quarantine_lock, flags); -+ raw_spin_lock_irqsave(&quarantine_lock, flags); - for (i = 0; i < QUARANTINE_BATCHES; i++) { - if (qlist_empty(&global_quarantine[i])) - continue; - qlist_move_cache(&global_quarantine[i], &to_free, cache); - /* Scanning whole quarantine can take a while. */ -- spin_unlock_irqrestore(&quarantine_lock, flags); -+ raw_spin_unlock_irqrestore(&quarantine_lock, flags); - cond_resched(); -- spin_lock_irqsave(&quarantine_lock, flags); -+ raw_spin_lock_irqsave(&quarantine_lock, flags); - } -- spin_unlock_irqrestore(&quarantine_lock, flags); -+ raw_spin_unlock_irqrestore(&quarantine_lock, flags); - - qlist_free_all(&to_free, cache); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch deleted file mode 100644 index 3b418ce06..000000000 --- a/kernel/patches-4.19.x-rt/0023-EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch +++ /dev/null @@ -1,50 +0,0 @@ -From c7753a6fd996fcaa0285c1c8285fde721d519a0a Mon Sep 17 00:00:00 2001 -From: "Paul E. McKenney" -Date: Mon, 29 Oct 2018 11:53:01 +0100 -Subject: [PATCH 023/328] EXP rcu: Revert expedited GP parallelization - cleverness - -(Commit 258ba8e089db23f760139266c232f01bad73f85c from linux-rcu) - -This commit reverts a series of commits starting with fcc635436501 ("rcu: -Make expedited GPs handle CPU 0 being offline") and its successors, thus -queueing each rcu_node structure's expedited grace-period initialization -work on the first CPU of that rcu_node structure. - -Suggested-by: Sebastian Andrzej Siewior -Signed-off-by: Paul E. McKenney -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tree_exp.h | 9 +-------- - 1 file changed, 1 insertion(+), 8 deletions(-) - -diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h -index 0b2c2ad69629..a0486414edb4 100644 ---- a/kernel/rcu/tree_exp.h -+++ b/kernel/rcu/tree_exp.h -@@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) - static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, - smp_call_func_t func) - { -- int cpu; - struct rcu_node *rnp; - - trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); -@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, - continue; - } - INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); -- preempt_disable(); -- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); -- /* If all offline, queue the work on an unbound CPU. */ -- if (unlikely(cpu > rnp->grphi)) -- cpu = WORK_CPU_UNBOUND; -- queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); -- preempt_enable(); -+ queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); - rnp->exp_need_flush = true; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch b/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch deleted file mode 100644 index 2c2317515..000000000 --- a/kernel/patches-4.19.x-rt/0024-kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch +++ /dev/null @@ -1,168 +0,0 @@ -From 4b0c7eda4403c5a7146714857bd1abffd2b080f8 Mon Sep 17 00:00:00 2001 -From: He Zhe -Date: Wed, 19 Dec 2018 16:30:57 +0100 -Subject: [PATCH 024/328] kmemleak: Turn kmemleak_lock to raw spinlock on RT -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -kmemleak_lock, as a rwlock on RT, can possibly be held in atomic context and -causes the follow BUG. - -BUG: scheduling while atomic: migration/15/132/0x00000002 -Preemption disabled at: -[] cpu_stopper_thread+0x71/0x100 -CPU: 15 PID: 132 Comm: migration/15 Not tainted 4.19.0-rt1-preempt-rt #1 -Call Trace: - schedule+0x3d/0xe0 - __rt_spin_lock+0x26/0x30 - __write_rt_lock+0x23/0x1a0 - rt_write_lock+0x2a/0x30 - find_and_remove_object+0x1e/0x80 - delete_object_full+0x10/0x20 - kmemleak_free+0x32/0x50 - kfree+0x104/0x1f0 - intel_pmu_cpu_dying+0x67/0x70 - x86_pmu_dying_cpu+0x1a/0x30 - cpuhp_invoke_callback+0x92/0x700 - take_cpu_down+0x70/0xa0 - multi_cpu_stop+0x62/0xc0 - cpu_stopper_thread+0x79/0x100 - smpboot_thread_fn+0x20f/0x2d0 - kthread+0x121/0x140 - -And on v4.18 stable tree the following call trace, caused by grabbing -kmemleak_lock again, is also observed. - -kernel BUG at kernel/locking/rtmutex.c:1048! -CPU: 5 PID: 689 Comm: mkfs.ext4 Not tainted 4.18.16-rt9-preempt-rt #1 -Call Trace: - rt_write_lock+0x2a/0x30 - create_object+0x17d/0x2b0 - kmemleak_alloc+0x34/0x50 - kmem_cache_alloc+0x146/0x220 - mempool_alloc_slab+0x15/0x20 - mempool_alloc+0x65/0x170 - sg_pool_alloc+0x21/0x60 - sg_alloc_table_chained+0x8b/0xb0 -… - blk_flush_plug_list+0x204/0x230 - schedule+0x87/0xe0 - rt_write_lock+0x2a/0x30 - create_object+0x17d/0x2b0 - kmemleak_alloc+0x34/0x50 - __kmalloc_node+0x1cd/0x340 - alloc_request_size+0x30/0x70 - mempool_alloc+0x65/0x170 - get_request+0x4e3/0x8d0 - blk_queue_bio+0x153/0x470 - generic_make_request+0x1dc/0x3f0 - submit_bio+0x49/0x140 -… - -kmemleak is an error detecting feature. We would not expect as good performance -as without it. As there is no raw rwlock defining helpers, we turn kmemleak_lock -to a raw spinlock. - -Signed-off-by: He Zhe -Cc: catalin.marinas@arm.com -Cc: bigeasy@linutronix.de -Cc: tglx@linutronix.de -Cc: rostedt@goodmis.org -Acked-by: Catalin Marinas -Link: https://lkml.kernel.org/r/1542877459-144382-1-git-send-email-zhe.he@windriver.com -Link: https://lkml.kernel.org/r/20181218150744.GB20197@arrakis.emea.arm.com -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/kmemleak.c | 20 ++++++++++---------- - 1 file changed, 10 insertions(+), 10 deletions(-) - -diff --git a/mm/kmemleak.c b/mm/kmemleak.c -index 5eeabece0c17..92ce99b15f2b 100644 ---- a/mm/kmemleak.c -+++ b/mm/kmemleak.c -@@ -26,7 +26,7 @@ - * - * The following locks and mutexes are used by kmemleak: - * -- * - kmemleak_lock (rwlock): protects the object_list modifications and -+ * - kmemleak_lock (raw spinlock): protects the object_list modifications and - * accesses to the object_tree_root. The object_list is the main list - * holding the metadata (struct kmemleak_object) for the allocated memory - * blocks. The object_tree_root is a red black tree used to look-up -@@ -197,7 +197,7 @@ static LIST_HEAD(gray_list); - /* search tree for object boundaries */ - static struct rb_root object_tree_root = RB_ROOT; - /* rw_lock protecting the access to object_list and object_tree_root */ --static DEFINE_RWLOCK(kmemleak_lock); -+static DEFINE_RAW_SPINLOCK(kmemleak_lock); - - /* allocation caches for kmemleak internal data */ - static struct kmem_cache *object_cache; -@@ -491,9 +491,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) - struct kmemleak_object *object; - - rcu_read_lock(); -- read_lock_irqsave(&kmemleak_lock, flags); -+ raw_spin_lock_irqsave(&kmemleak_lock, flags); - object = lookup_object(ptr, alias); -- read_unlock_irqrestore(&kmemleak_lock, flags); -+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); - - /* check whether the object is still available */ - if (object && !get_object(object)) -@@ -513,13 +513,13 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali - unsigned long flags; - struct kmemleak_object *object; - -- write_lock_irqsave(&kmemleak_lock, flags); -+ raw_spin_lock_irqsave(&kmemleak_lock, flags); - object = lookup_object(ptr, alias); - if (object) { - rb_erase(&object->rb_node, &object_tree_root); - list_del_rcu(&object->object_list); - } -- write_unlock_irqrestore(&kmemleak_lock, flags); -+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); - - return object; - } -@@ -593,7 +593,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, - /* kernel backtrace */ - object->trace_len = __save_stack_trace(object->trace); - -- write_lock_irqsave(&kmemleak_lock, flags); -+ raw_spin_lock_irqsave(&kmemleak_lock, flags); - - min_addr = min(min_addr, ptr); - max_addr = max(max_addr, ptr + size); -@@ -624,7 +624,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, - - list_add_tail_rcu(&object->object_list, &object_list); - out: -- write_unlock_irqrestore(&kmemleak_lock, flags); -+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); - return object; - } - -@@ -1310,7 +1310,7 @@ static void scan_block(void *_start, void *_end, - unsigned long *end = _end - (BYTES_PER_POINTER - 1); - unsigned long flags; - -- read_lock_irqsave(&kmemleak_lock, flags); -+ raw_spin_lock_irqsave(&kmemleak_lock, flags); - for (ptr = start; ptr < end; ptr++) { - struct kmemleak_object *object; - unsigned long pointer; -@@ -1367,7 +1367,7 @@ static void scan_block(void *_start, void *_end, - spin_unlock(&object->lock); - } - } -- read_unlock_irqrestore(&kmemleak_lock, flags); -+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags); - } - - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch deleted file mode 100644 index 9a58a6b2d..000000000 --- a/kernel/patches-4.19.x-rt/0025-NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ /dev/null @@ -1,135 +0,0 @@ -From 7cb617c6dac1356dfe57b1c4a976ec78ead046a0 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 28 Oct 2016 23:05:11 +0200 -Subject: [PATCH 025/328] NFSv4: replace seqcount_t with a seqlock_t - -The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me -because it maps to preempt_disable() in -RT which I can't have at this -point. So I took a look at the code. -It the lockdep part was removed in commit abbec2da13f0 ("NFS: Use -raw_write_seqcount_begin/end int nfs4_reclaim_open_state") because -lockdep complained. The whole seqcount thing was introduced in commit -c137afabe330 ("NFSv4: Allow the state manager to mark an open_owner as -being recovered"). -The recovery threads runs only once. -write_seqlock() does not work on !RT because it disables preemption and it the -writer side is preemptible (has to remain so despite the fact that it will -block readers). - -Reported-by: kernel test robot -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/nfs/delegation.c | 4 ++-- - fs/nfs/nfs4_fs.h | 2 +- - fs/nfs/nfs4proc.c | 4 ++-- - fs/nfs/nfs4state.c | 22 ++++++++++++++++------ - 4 files changed, 21 insertions(+), 11 deletions(-) - -diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c -index b0c0c2fc2fba..26565ba05dc1 100644 ---- a/fs/nfs/delegation.c -+++ b/fs/nfs/delegation.c -@@ -162,11 +162,11 @@ static int nfs_delegation_claim_opens(struct inode *inode, - sp = state->owner; - /* Block nfs4_proc_unlck */ - mutex_lock(&sp->so_delegreturn_mutex); -- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); -+ seq = read_seqbegin(&sp->so_reclaim_seqlock); - err = nfs4_open_delegation_recall(ctx, state, stateid); - if (!err) - err = nfs_delegation_claim_locks(state, stateid); -- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) -+ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq)) - err = -EAGAIN; - mutex_unlock(&sp->so_delegreturn_mutex); - put_nfs_open_context(ctx); -diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h -index 5b61520dce88..2771aafaca19 100644 ---- a/fs/nfs/nfs4_fs.h -+++ b/fs/nfs/nfs4_fs.h -@@ -114,7 +114,7 @@ struct nfs4_state_owner { - unsigned long so_flags; - struct list_head so_states; - struct nfs_seqid_counter so_seqid; -- seqcount_t so_reclaim_seqcount; -+ seqlock_t so_reclaim_seqlock; - struct mutex so_delegreturn_mutex; - }; - -diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c -index 668b648064b7..187d411668ed 100644 ---- a/fs/nfs/nfs4proc.c -+++ b/fs/nfs/nfs4proc.c -@@ -2870,7 +2870,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, - unsigned int seq; - int ret; - -- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); -+ seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); - - ret = _nfs4_proc_open(opendata, ctx); - if (ret != 0) -@@ -2911,7 +2911,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, - - if (d_inode(dentry) == state->inode) { - nfs_inode_attach_open_context(ctx); -- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) -+ if (read_seqretry(&sp->so_reclaim_seqlock, seq)) - nfs4_schedule_stateid_recovery(server, state); - } - -diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c -index b3086e99420c..c9bf1eb7e1b2 100644 ---- a/fs/nfs/nfs4state.c -+++ b/fs/nfs/nfs4state.c -@@ -515,7 +515,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, - nfs4_init_seqid_counter(&sp->so_seqid); - atomic_set(&sp->so_count, 1); - INIT_LIST_HEAD(&sp->so_lru); -- seqcount_init(&sp->so_reclaim_seqcount); -+ seqlock_init(&sp->so_reclaim_seqlock); - mutex_init(&sp->so_delegreturn_mutex); - return sp; - } -@@ -1583,8 +1583,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs - * recovering after a network partition or a reboot from a - * server that doesn't support a grace period. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ write_seqlock(&sp->so_reclaim_seqlock); -+#else -+ write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); -+#endif - spin_lock(&sp->so_lock); -- raw_write_seqcount_begin(&sp->so_reclaim_seqcount); - restart: - list_for_each_entry(state, &sp->so_states, open_states) { - if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1671,14 +1675,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs - spin_lock(&sp->so_lock); - goto restart; - } -- raw_write_seqcount_end(&sp->so_reclaim_seqcount); - spin_unlock(&sp->so_lock); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ write_sequnlock(&sp->so_reclaim_seqlock); -+#else -+ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); -+#endif - return 0; - out_err: - nfs4_put_open_state(state); -- spin_lock(&sp->so_lock); -- raw_write_seqcount_end(&sp->so_reclaim_seqcount); -- spin_unlock(&sp->so_lock); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ write_sequnlock(&sp->so_reclaim_seqlock); -+#else -+ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); -+#endif - return status; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch deleted file mode 100644 index a3763e42c..000000000 --- a/kernel/patches-4.19.x-rt/0026-kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ /dev/null @@ -1,784 +0,0 @@ -From 4906d6c574d916416e92a9de0b959c4d0ed0bc17 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 4 Apr 2017 12:50:16 +0200 -Subject: [PATCH 026/328] kernel: sched: Provide a pointer to the valid CPU - mask -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() -wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not -much difference in !RT but in RT we used this to implement -migrate_disable(). Within a migrate_disable() section the CPU mask is -restricted to single CPU while the "normal" CPU mask remains untouched. - -As an alternative implementation Ingo suggested to use - struct task_struct { - const cpumask_t *cpus_ptr; - cpumask_t cpus_mask; - }; -with - t->cpus_allowed_ptr = &t->cpus_allowed; - -In -RT we then can switch the cpus_ptr to - t->cpus_allowed_ptr = &cpumask_of(task_cpu(p)); - -in a migration disabled region. The rules are simple: -- Code that 'uses' ->cpus_allowed would use the pointer. -- Code that 'modifies' ->cpus_allowed would use the direct mask. - -While converting the existing users I tried to stick with the rules -above however… well mostly CPUFREQ tries to temporary switch the CPU -mask to do something on a certain CPU and then switches the mask back it -its original value. So in theory `cpus_ptr' could or should be used. -However if this is invoked in a migration disabled region (which is not -the case because it would require something like preempt_disable() and -set_cpus_allowed_ptr() might sleep so it can't be) then the "restore" -part would restore the wrong mask. So it only looks strange and I go for -the pointer… - -Some drivers copy the cpumask without cpumask_copy() and others use -cpumask_copy but without alloc_cpumask_var(). I did not fix those as -part of this, could do this as a follow up… - -So is this the way we want it? -Is the usage of `cpus_ptr' vs `cpus_mask' for the set + restore part -(see cpufreq users) what we want? At some point it looks like they -should use a different interface for their doing. I am not sure why -switching to certain CPU is important but maybe it could be done via a -workqueue from the CPUFREQ core (so we have a comment desribing why are -doing this and a get_online_cpus() to ensure that the CPU does not go -offline too early). - -Cc: Peter Zijlstra -Cc: Thomas Gleixner -Cc: Mike Galbraith -Cc: Ingo Molnar -Cc: Rafael J. Wysocki -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/ia64/kernel/mca.c | 2 +- - arch/mips/include/asm/switch_to.h | 4 +-- - arch/mips/kernel/mips-mt-fpaff.c | 2 +- - arch/mips/kernel/traps.c | 6 ++-- - arch/powerpc/platforms/cell/spufs/sched.c | 2 +- - arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 2 +- - drivers/infiniband/hw/hfi1/affinity.c | 6 ++-- - drivers/infiniband/hw/hfi1/sdma.c | 3 +- - drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++-- - fs/proc/array.c | 4 +-- - include/linux/sched.h | 5 +-- - init/init_task.c | 3 +- - kernel/cgroup/cpuset.c | 2 +- - kernel/fork.c | 2 ++ - kernel/sched/core.c | 40 ++++++++++----------- - kernel/sched/cpudeadline.c | 4 +-- - kernel/sched/cpupri.c | 4 +-- - kernel/sched/deadline.c | 6 ++-- - kernel/sched/fair.c | 32 ++++++++--------- - kernel/sched/rt.c | 4 +-- - kernel/trace/trace_hwlat.c | 2 +- - lib/smp_processor_id.c | 2 +- - samples/trace_events/trace-events-sample.c | 2 +- - 23 files changed, 74 insertions(+), 72 deletions(-) - -diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c -index 6115464d5f03..f09e34c8409c 100644 ---- a/arch/ia64/kernel/mca.c -+++ b/arch/ia64/kernel/mca.c -@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, - ti->cpu = cpu; - p->stack = ti; - p->state = TASK_UNINTERRUPTIBLE; -- cpumask_set_cpu(cpu, &p->cpus_allowed); -+ cpumask_set_cpu(cpu, &p->cpus_mask); - INIT_LIST_HEAD(&p->tasks); - p->parent = p->real_parent = p->group_leader = p; - INIT_LIST_HEAD(&p->children); -diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h -index e610473d61b8..1428b4febbc9 100644 ---- a/arch/mips/include/asm/switch_to.h -+++ b/arch/mips/include/asm/switch_to.h -@@ -42,7 +42,7 @@ extern struct task_struct *ll_task; - * inline to try to keep the overhead down. If we have been forced to run on - * a "CPU" with an FPU because of a previous high level of FP computation, - * but did not actually use the FPU during the most recent time-slice (CU1 -- * isn't set), we undo the restriction on cpus_allowed. -+ * isn't set), we undo the restriction on cpus_mask. - * - * We're not calling set_cpus_allowed() here, because we have no need to - * force prompt migration - we're already switching the current CPU to a -@@ -57,7 +57,7 @@ do { \ - test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ - (!(KSTK_STATUS(prev) & ST0_CU1))) { \ - clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ -- prev->cpus_allowed = prev->thread.user_cpus_allowed; \ -+ prev->cpus_mask = prev->thread.user_cpus_allowed; \ - } \ - next->thread.emulated_fp = 0; \ - } while(0) -diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c -index a7c0f97e4b0d..1a08428eedcf 100644 ---- a/arch/mips/kernel/mips-mt-fpaff.c -+++ b/arch/mips/kernel/mips-mt-fpaff.c -@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, - if (retval) - goto out_unlock; - -- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); -+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr); - cpumask_and(&mask, &allowed, cpu_active_mask); - - out_unlock: -diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c -index 9dab0ed1b227..3623cf32f5f4 100644 ---- a/arch/mips/kernel/traps.c -+++ b/arch/mips/kernel/traps.c -@@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void) - * restricted the allowed set to exclude any CPUs with FPUs, - * we'll skip the procedure. - */ -- if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { -+ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { - cpumask_t tmask; - - current->thread.user_cpus_allowed -- = current->cpus_allowed; -- cpumask_and(&tmask, ¤t->cpus_allowed, -+ = current->cpus_mask; -+ cpumask_and(&tmask, ¤t->cpus_mask, - &mt_fpu_cpumask); - set_cpus_allowed_ptr(current, &tmask); - set_thread_flag(TIF_FPUBOUND); -diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c -index c9ef3c532169..cb10249b1125 100644 ---- a/arch/powerpc/platforms/cell/spufs/sched.c -+++ b/arch/powerpc/platforms/cell/spufs/sched.c -@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) - * runqueue. The context will be rescheduled on the proper node - * if it is timesliced or preempted. - */ -- cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed); -+ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); - - /* Save the current cpu id for spu interrupt routing. */ - ctx->last_ran = raw_smp_processor_id(); -diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c -index a999a58ca331..d6410d0740ea 100644 ---- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c -+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c -@@ -1445,7 +1445,7 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) - * may be scheduled elsewhere and invalidate entries in the - * pseudo-locked region. - */ -- if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { -+ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } -diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c -index 01ed0a667928..2c62de6b5bf1 100644 ---- a/drivers/infiniband/hw/hfi1/affinity.c -+++ b/drivers/infiniband/hw/hfi1/affinity.c -@@ -1039,7 +1039,7 @@ int hfi1_get_proc_affinity(int node) - struct hfi1_affinity_node *entry; - cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; - const struct cpumask *node_mask, -- *proc_mask = ¤t->cpus_allowed; -+ *proc_mask = current->cpus_ptr; - struct hfi1_affinity_node_list *affinity = &node_affinity; - struct cpu_mask_set *set = &affinity->proc; - -@@ -1047,7 +1047,7 @@ int hfi1_get_proc_affinity(int node) - * check whether process/context affinity has already - * been set - */ -- if (cpumask_weight(proc_mask) == 1) { -+ if (current->nr_cpus_allowed == 1) { - hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", - current->pid, current->comm, - cpumask_pr_args(proc_mask)); -@@ -1058,7 +1058,7 @@ int hfi1_get_proc_affinity(int node) - cpu = cpumask_first(proc_mask); - cpumask_set_cpu(cpu, &set->used); - goto done; -- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { -+ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { - hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", - current->pid, current->comm, - cpumask_pr_args(proc_mask)); -diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c -index 291c12f588b5..05e7b28a03c1 100644 ---- a/drivers/infiniband/hw/hfi1/sdma.c -+++ b/drivers/infiniband/hw/hfi1/sdma.c -@@ -853,14 +853,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, - { - struct sdma_rht_node *rht_node; - struct sdma_engine *sde = NULL; -- const struct cpumask *current_mask = ¤t->cpus_allowed; - unsigned long cpu_id; - - /* - * To ensure that always the same sdma engine(s) will be - * selected make sure the process is pinned to this CPU only. - */ -- if (cpumask_weight(current_mask) != 1) -+ if (current->nr_cpus_allowed != 1) - goto out; - - cpu_id = smp_processor_id(); -diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c -index 98e1ce14fa2a..5d3828625017 100644 ---- a/drivers/infiniband/hw/qib/qib_file_ops.c -+++ b/drivers/infiniband/hw/qib/qib_file_ops.c -@@ -1142,7 +1142,7 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt) - static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) - { - struct qib_filedata *fd = fp->private_data; -- const unsigned int weight = cpumask_weight(¤t->cpus_allowed); -+ const unsigned int weight = current->nr_cpus_allowed; - const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); - int local_cpu; - -@@ -1623,9 +1623,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) - ret = find_free_ctxt(i_minor - 1, fp, uinfo); - else { - int unit; -- const unsigned int cpu = cpumask_first(¤t->cpus_allowed); -- const unsigned int weight = -- cpumask_weight(¤t->cpus_allowed); -+ const unsigned int cpu = cpumask_first(current->cpus_ptr); -+ const unsigned int weight = current->nr_cpus_allowed; - - if (weight == 1 && !test_bit(cpu, qib_cpulist)) - if (!find_hca(cpu, &unit) && unit >= 0) -diff --git a/fs/proc/array.c b/fs/proc/array.c -index 9eb99a43f849..e4d0cfebaac5 100644 ---- a/fs/proc/array.c -+++ b/fs/proc/array.c -@@ -381,9 +381,9 @@ static inline void task_context_switch_counts(struct seq_file *m, - static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) - { - seq_printf(m, "Cpus_allowed:\t%*pb\n", -- cpumask_pr_args(&task->cpus_allowed)); -+ cpumask_pr_args(task->cpus_ptr)); - seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", -- cpumask_pr_args(&task->cpus_allowed)); -+ cpumask_pr_args(task->cpus_ptr)); - } - - static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 0530de9a4efc..4298a87b9de6 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -660,7 +660,8 @@ struct task_struct { - - unsigned int policy; - int nr_cpus_allowed; -- cpumask_t cpus_allowed; -+ const cpumask_t *cpus_ptr; -+ cpumask_t cpus_mask; - - #ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; -@@ -1398,7 +1399,7 @@ extern struct pid *cad_pid; - #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ - #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ - #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ --#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ -+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ - #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ - #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ - #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -diff --git a/init/init_task.c b/init/init_task.c -index 5aebe3be4d7c..0b49b9cf5571 100644 ---- a/init/init_task.c -+++ b/init/init_task.c -@@ -71,7 +71,8 @@ struct task_struct init_task - .static_prio = MAX_PRIO - 20, - .normal_prio = MAX_PRIO - 20, - .policy = SCHED_NORMAL, -- .cpus_allowed = CPU_MASK_ALL, -+ .cpus_ptr = &init_task.cpus_mask, -+ .cpus_mask = CPU_MASK_ALL, - .nr_cpus_allowed= NR_CPUS, - .mm = NULL, - .active_mm = &init_mm, -diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index ff956ccbb6df..7bb129c5b412 100644 ---- a/kernel/cgroup/cpuset.c -+++ b/kernel/cgroup/cpuset.c -@@ -2090,7 +2090,7 @@ static void cpuset_fork(struct task_struct *task) - if (task_css_is_root(task, cpuset_cgrp_id)) - return; - -- set_cpus_allowed_ptr(task, ¤t->cpus_allowed); -+ set_cpus_allowed_ptr(task, current->cpus_ptr); - task->mems_allowed = current->mems_allowed; - } - -diff --git a/kernel/fork.c b/kernel/fork.c -index 1a2d18e98bf9..bc182d6fa2a9 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -850,6 +850,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) - #ifdef CONFIG_STACKPROTECTOR - tsk->stack_canary = get_random_canary(); - #endif -+ if (orig->cpus_ptr == &orig->cpus_mask) -+ tsk->cpus_ptr = &tsk->cpus_mask; - - /* - * One for us, one for whoever does the "release_task()" (usually -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 2befd2c4ce9e..07dc66137a26 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -878,7 +878,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) - */ - static inline bool is_cpu_allowed(struct task_struct *p, int cpu) - { -- if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) -+ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) - return false; - - if (is_per_cpu_kthread(p)) -@@ -973,7 +973,7 @@ static int migration_cpu_stop(void *data) - local_irq_disable(); - /* - * We need to explicitly wake pending tasks before running -- * __migrate_task() such that we will not miss enforcing cpus_allowed -+ * __migrate_task() such that we will not miss enforcing cpus_ptr - * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. - */ - sched_ttwu_pending(); -@@ -1004,7 +1004,7 @@ static int migration_cpu_stop(void *data) - */ - void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) - { -- cpumask_copy(&p->cpus_allowed, new_mask); -+ cpumask_copy(&p->cpus_mask, new_mask); - p->nr_cpus_allowed = cpumask_weight(new_mask); - } - -@@ -1074,7 +1074,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, - goto out; - } - -- if (cpumask_equal(&p->cpus_allowed, new_mask)) -+ if (cpumask_equal(p->cpus_ptr, new_mask)) - goto out; - - dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); -@@ -1237,10 +1237,10 @@ static int migrate_swap_stop(void *data) - if (task_cpu(arg->src_task) != arg->src_cpu) - goto unlock; - -- if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed)) -+ if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) - goto unlock; - -- if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed)) -+ if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) - goto unlock; - - __migrate_swap_task(arg->src_task, arg->dst_cpu); -@@ -1282,10 +1282,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, - if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) - goto out; - -- if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed)) -+ if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) - goto out; - -- if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed)) -+ if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) - goto out; - - trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); -@@ -1430,7 +1430,7 @@ void kick_process(struct task_struct *p) - EXPORT_SYMBOL_GPL(kick_process); - - /* -- * ->cpus_allowed is protected by both rq->lock and p->pi_lock -+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock - * - * A few notes on cpu_active vs cpu_online: - * -@@ -1470,14 +1470,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p) - for_each_cpu(dest_cpu, nodemask) { - if (!cpu_active(dest_cpu)) - continue; -- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) -+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) - return dest_cpu; - } - } - - for (;;) { - /* Any allowed, online CPU? */ -- for_each_cpu(dest_cpu, &p->cpus_allowed) { -+ for_each_cpu(dest_cpu, p->cpus_ptr) { - if (!is_cpu_allowed(p, dest_cpu)) - continue; - -@@ -1521,7 +1521,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) - } - - /* -- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. -+ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. - */ - static inline - int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) -@@ -1531,11 +1531,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) - if (p->nr_cpus_allowed > 1) - cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); - else -- cpu = cpumask_any(&p->cpus_allowed); -+ cpu = cpumask_any(p->cpus_ptr); - - /* - * In order not to call set_task_cpu() on a blocking task we need -- * to rely on ttwu() to place the task on a valid ->cpus_allowed -+ * to rely on ttwu() to place the task on a valid ->cpus_ptr - * CPU. - * - * Since this is common to all placement strategies, this lives here. -@@ -2402,7 +2402,7 @@ void wake_up_new_task(struct task_struct *p) - #ifdef CONFIG_SMP - /* - * Fork balancing, do it here and not earlier because: -- * - cpus_allowed can change in the fork path -+ * - cpus_ptr can change in the fork path - * - any previously selected CPU might disappear through hotplug - * - * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4316,7 +4316,7 @@ static int __sched_setscheduler(struct task_struct *p, - * the entire root_domain to become SCHED_DEADLINE. We - * will also fail if there's no bandwidth available. - */ -- if (!cpumask_subset(span, &p->cpus_allowed) || -+ if (!cpumask_subset(span, p->cpus_ptr) || - rq->rd->dl_bw.bw == 0) { - task_rq_unlock(rq, p, &rf); - return -EPERM; -@@ -4915,7 +4915,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask) - goto out_unlock; - - raw_spin_lock_irqsave(&p->pi_lock, flags); -- cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); -+ cpumask_and(mask, &p->cpus_mask, cpu_active_mask); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); - - out_unlock: -@@ -5496,7 +5496,7 @@ int task_can_attach(struct task_struct *p, - * allowed nodes is unnecessary. Thus, cpusets are not - * applicable for such threads. This prevents checking for - * success of set_cpus_allowed_ptr() on all attached tasks -- * before cpus_allowed may be changed. -+ * before cpus_mask may be changed. - */ - if (p->flags & PF_NO_SETAFFINITY) { - ret = -EINVAL; -@@ -5523,7 +5523,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu) - if (curr_cpu == target_cpu) - return 0; - -- if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed)) -+ if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) - return -EINVAL; - - /* TODO: This is not properly updating schedstats */ -@@ -5661,7 +5661,7 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) - put_prev_task(rq, next); - - /* -- * Rules for changing task_struct::cpus_allowed are holding -+ * Rules for changing task_struct::cpus_mask are holding - * both pi_lock and rq->lock, such that holding either - * stabilizes the mask. - * -diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c -index 50316455ea66..d57fb2f8ae67 100644 ---- a/kernel/sched/cpudeadline.c -+++ b/kernel/sched/cpudeadline.c -@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, - const struct sched_dl_entity *dl_se = &p->dl; - - if (later_mask && -- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { -+ cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { - return 1; - } else { - int best_cpu = cpudl_maximum(cp); - - WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); - -- if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) && -+ if (cpumask_test_cpu(best_cpu, p->cpus_ptr) && - dl_time_before(dl_se->deadline, cp->elements[0].dl)) { - if (later_mask) - cpumask_set_cpu(best_cpu, later_mask); -diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c -index daaadf939ccb..f7d2c10b4c92 100644 ---- a/kernel/sched/cpupri.c -+++ b/kernel/sched/cpupri.c -@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, - if (skip) - continue; - -- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) -+ if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) - continue; - - if (lowest_mask) { -- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); -+ cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); - - /* - * We have to ensure that we have at least one bit -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index ebec37cb3be9..4b13df38c069 100644 ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p - * If we cannot preempt any rq, fall back to pick any - * online CPU: - */ -- cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); -+ cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); - if (cpu >= nr_cpu_ids) { - /* - * Failed to find any suitable CPU. -@@ -1856,7 +1856,7 @@ static void set_curr_task_dl(struct rq *rq) - static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) - { - if (!task_running(rq, p) && -- cpumask_test_cpu(cpu, &p->cpus_allowed)) -+ cpumask_test_cpu(cpu, p->cpus_ptr)) - return 1; - return 0; - } -@@ -2006,7 +2006,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) - /* Retry if something changed. */ - if (double_lock_balance(rq, later_rq)) { - if (unlikely(task_rq(task) != rq || -- !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || -+ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || - task_running(rq, task) || - !dl_task(task) || - !task_on_rq_queued(task))) { -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 0f1ba3d72336..27f9f9a785c1 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -1678,7 +1678,7 @@ static void task_numa_compare(struct task_numa_env *env, - * be incurred if the tasks were swapped. - */ - /* Skip this swap candidate if cannot move to the source cpu */ -- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) -+ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) - goto unlock; - - /* -@@ -1776,7 +1776,7 @@ static void task_numa_find_cpu(struct task_numa_env *env, - - for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { - /* Skip this CPU if the source task cannot migrate */ -- if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed)) -+ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) - continue; - - env->dst_cpu = cpu; -@@ -5782,7 +5782,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, - - /* Skip over this group if it has no CPUs allowed */ - if (!cpumask_intersects(sched_group_span(group), -- &p->cpus_allowed)) -+ p->cpus_ptr)) - continue; - - local_group = cpumask_test_cpu(this_cpu, -@@ -5914,7 +5914,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this - return cpumask_first(sched_group_span(group)); - - /* Traverse only the allowed CPUs */ -- for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { -+ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { - if (available_idle_cpu(i)) { - struct rq *rq = cpu_rq(i); - struct cpuidle_state *idle = idle_get_state(rq); -@@ -5954,7 +5954,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p - { - int new_cpu = cpu; - -- if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) -+ if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) - return prev_cpu; - - /* -@@ -6071,7 +6071,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int - if (!test_idle_cores(target, false)) - return -1; - -- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); -+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); - - for_each_cpu_wrap(core, cpus, target) { - bool idle = true; -@@ -6105,7 +6105,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t - return -1; - - for_each_cpu(cpu, cpu_smt_mask(target)) { -- if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) -+ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) - continue; - if (available_idle_cpu(cpu)) - return cpu; -@@ -6168,7 +6168,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t - for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { - if (!--nr) - return -1; -- if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) -+ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) - continue; - if (available_idle_cpu(cpu)) - break; -@@ -6205,7 +6205,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) - recent_used_cpu != target && - cpus_share_cache(recent_used_cpu, target) && - available_idle_cpu(recent_used_cpu) && -- cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { -+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) { - /* - * Replace recent_used_cpu with prev as it is a potential - * candidate for the next wake: -@@ -6423,7 +6423,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f - if (sd_flag & SD_BALANCE_WAKE) { - record_wakee(p); - want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) -- && cpumask_test_cpu(cpu, &p->cpus_allowed); -+ && cpumask_test_cpu(cpu, p->cpus_ptr); - } - - rcu_read_lock(); -@@ -7162,14 +7162,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) - /* - * We do not migrate tasks that are: - * 1) throttled_lb_pair, or -- * 2) cannot be migrated to this CPU due to cpus_allowed, or -+ * 2) cannot be migrated to this CPU due to cpus_ptr, or - * 3) running (obviously), or - * 4) are cache-hot on their current CPU. - */ - if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) - return 0; - -- if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) { -+ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { - int cpu; - - schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7189,7 +7189,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) - - /* Prevent to re-select dst_cpu via env's CPUs: */ - for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { -- if (cpumask_test_cpu(cpu, &p->cpus_allowed)) { -+ if (cpumask_test_cpu(cpu, p->cpus_ptr)) { - env->flags |= LBF_DST_PINNED; - env->new_dst_cpu = cpu; - break; -@@ -7786,7 +7786,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) - - /* - * Group imbalance indicates (and tries to solve) the problem where balancing -- * groups is inadequate due to ->cpus_allowed constraints. -+ * groups is inadequate due to ->cpus_ptr constraints. - * - * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a - * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8401,7 +8401,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) - /* - * If the busiest group is imbalanced the below checks don't - * work because they assume all things are equal, which typically -- * isn't true due to cpus_allowed constraints and the like. -+ * isn't true due to cpus_ptr constraints and the like. - */ - if (busiest->group_type == group_imbalanced) - goto force_balance; -@@ -8797,7 +8797,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, - * if the curr task on busiest CPU can't be - * moved to this_cpu: - */ -- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { -+ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { - raw_spin_unlock_irqrestore(&busiest->lock, - flags); - env.flags |= LBF_ALL_PINNED; -diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index b980cc96604f..b6ca4a630050 100644 ---- a/kernel/sched/rt.c -+++ b/kernel/sched/rt.c -@@ -1611,7 +1611,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) - static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) - { - if (!task_running(rq, p) && -- cpumask_test_cpu(cpu, &p->cpus_allowed)) -+ cpumask_test_cpu(cpu, p->cpus_ptr)) - return 1; - - return 0; -@@ -1748,7 +1748,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) - * Also make sure that it wasn't scheduled on its rq. - */ - if (unlikely(task_rq(task) != rq || -- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) || -+ !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || - task_running(rq, task) || - !rt_task(task) || - !task_on_rq_queued(task))) { -diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c -index 8030e24dbf14..862f4b0139fc 100644 ---- a/kernel/trace/trace_hwlat.c -+++ b/kernel/trace/trace_hwlat.c -@@ -279,7 +279,7 @@ static void move_to_next_cpu(void) - * of this thread, than stop migrating for the duration - * of the current test. - */ -- if (!cpumask_equal(current_mask, ¤t->cpus_allowed)) -+ if (!cpumask_equal(current_mask, current->cpus_ptr)) - goto disable; - - get_online_cpus(); -diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c -index 85925aaa4fff..fb35c45b9421 100644 ---- a/lib/smp_processor_id.c -+++ b/lib/smp_processor_id.c -@@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, - * Kernel threads bound to a single CPU can safely use - * smp_processor_id(): - */ -- if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) -+ if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) - goto out; - - /* -diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c -index 5522692100ba..8b4be8e1802a 100644 ---- a/samples/trace_events/trace-events-sample.c -+++ b/samples/trace_events/trace-events-sample.c -@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt) - - /* Silly tracepoints */ - trace_foo_bar("hello", cnt, array, random_strings[len], -- ¤t->cpus_allowed); -+ current->cpus_ptr); - - trace_foo_with_template_simple("HELLO", cnt); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch b/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch deleted file mode 100644 index d362bea4a..000000000 --- a/kernel/patches-4.19.x-rt/0027-kernel-sched-core-add-migrate_disable.patch +++ /dev/null @@ -1,265 +0,0 @@ -From 9f9cd889fa22fc1e25802f565f7210b271d136a2 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 027/328] kernel/sched/core: add migrate_disable() - ---- - include/linux/preempt.h | 23 +++++++ - include/linux/sched.h | 7 +++ - include/linux/smp.h | 3 + - kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++- - kernel/sched/debug.c | 4 ++ - 5 files changed, 165 insertions(+), 2 deletions(-) - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index c01813c3fbe9..3196d0e76719 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -185,6 +185,22 @@ do { \ - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) - -+#ifdef CONFIG_SMP -+ -+extern void migrate_disable(void); -+extern void migrate_enable(void); -+ -+int __migrate_disabled(struct task_struct *p); -+ -+#else -+#define migrate_disable() barrier() -+#define migrate_enable() barrier() -+static inline int __migrate_disabled(struct task_struct *p) -+{ -+ return 0; -+} -+#endif -+ - #ifdef CONFIG_PREEMPT - #define preempt_enable() \ - do { \ -@@ -253,6 +269,13 @@ do { \ - #define preempt_enable_notrace() barrier() - #define preemptible() 0 - -+#define migrate_disable() barrier() -+#define migrate_enable() barrier() -+ -+static inline int __migrate_disabled(struct task_struct *p) -+{ -+ return 0; -+} - #endif /* CONFIG_PREEMPT_COUNT */ - - #ifdef MODULE -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 4298a87b9de6..0489d3e0e78c 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -662,6 +662,13 @@ struct task_struct { - int nr_cpus_allowed; - const cpumask_t *cpus_ptr; - cpumask_t cpus_mask; -+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+ int migrate_disable; -+ int migrate_disable_update; -+# ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable_atomic; -+# endif -+#endif - - #ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; -diff --git a/include/linux/smp.h b/include/linux/smp.h -index 9fb239e12b82..5801e516ba63 100644 ---- a/include/linux/smp.h -+++ b/include/linux/smp.h -@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void) - #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) - #define put_cpu() preempt_enable() - -+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) -+#define put_cpu_light() migrate_enable() -+ - /* - * Callback to arch code if there's nosmp or maxcpus=0 on the - * boot command line: -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 07dc66137a26..d0450f06612c 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1008,7 +1008,15 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma - p->nr_cpus_allowed = cpumask_weight(new_mask); - } - --void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+int __migrate_disabled(struct task_struct *p) -+{ -+ return p->migrate_disable; -+} -+#endif -+ -+static void __do_set_cpus_allowed_tail(struct task_struct *p, -+ const struct cpumask *new_mask) - { - struct rq *rq = task_rq(p); - bool queued, running; -@@ -1037,6 +1045,20 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) - set_curr_task(rq, p); - } - -+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -+{ -+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+ if (__migrate_disabled(p)) { -+ lockdep_assert_held(&p->pi_lock); -+ -+ cpumask_copy(&p->cpus_mask, new_mask); -+ p->migrate_disable_update = 1; -+ return; -+ } -+#endif -+ __do_set_cpus_allowed_tail(p, new_mask); -+} -+ - /* - * Change a given task's CPU affinity. Migrate the thread to a - * proper CPU and schedule it away if the CPU it's executing on -@@ -1096,9 +1118,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, - } - - /* Can the task run on the task's current CPU? If so, we're done */ -- if (cpumask_test_cpu(task_cpu(p), new_mask)) -+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) - goto out; - -+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+ if (__migrate_disabled(p)) { -+ p->migrate_disable_update = 1; -+ goto out; -+ } -+#endif -+ - if (task_running(rq, p) || p->state == TASK_WAKING) { - struct migration_arg arg = { p, dest_cpu }; - /* Need help from migration thread: drop lock and wait. */ -@@ -7105,3 +7134,100 @@ const u32 sched_prio_to_wmult[40] = { - }; - - #undef CREATE_TRACE_POINTS -+ -+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+ -+void migrate_disable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic() || irqs_disabled()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic++; -+#endif -+ return; -+ } -+#ifdef CONFIG_SCHED_DEBUG -+ WARN_ON_ONCE(p->migrate_disable_atomic); -+#endif -+ -+ if (p->migrate_disable) { -+ p->migrate_disable++; -+ return; -+ } -+ -+ preempt_disable(); -+ p->migrate_disable = 1; -+ -+ p->cpus_ptr = cpumask_of(smp_processor_id()); -+ p->nr_cpus_allowed = 1; -+ -+ preempt_enable(); -+} -+EXPORT_SYMBOL(migrate_disable); -+ -+void migrate_enable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic() || irqs_disabled()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic--; -+#endif -+ return; -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ WARN_ON_ONCE(p->migrate_disable_atomic); -+#endif -+ -+ WARN_ON_ONCE(p->migrate_disable <= 0); -+ if (p->migrate_disable > 1) { -+ p->migrate_disable--; -+ return; -+ } -+ -+ preempt_disable(); -+ -+ p->cpus_ptr = &p->cpus_mask; -+ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); -+ p->migrate_disable = 0; -+ -+ if (p->migrate_disable_update) { -+ struct rq *rq; -+ struct rq_flags rf; -+ -+ rq = task_rq_lock(p, &rf); -+ update_rq_clock(rq); -+ -+ __do_set_cpus_allowed_tail(p, &p->cpus_mask); -+ task_rq_unlock(rq, p, &rf); -+ -+ p->migrate_disable_update = 0; -+ -+ WARN_ON(smp_processor_id() != task_cpu(p)); -+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { -+ const struct cpumask *cpu_valid_mask = cpu_active_mask; -+ struct migration_arg arg; -+ unsigned int dest_cpu; -+ -+ if (p->flags & PF_KTHREAD) { -+ /* -+ * Kernel threads are allowed on online && !active CPUs -+ */ -+ cpu_valid_mask = cpu_online_mask; -+ } -+ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask); -+ arg.task = p; -+ arg.dest_cpu = dest_cpu; -+ -+ preempt_enable(); -+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); -+ tlb_migrate_finish(p->mm); -+ return; -+ } -+ } -+ preempt_enable(); -+} -+EXPORT_SYMBOL(migrate_enable); -+#endif -diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index 78fadf0438ea..5027158d3908 100644 ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -982,6 +982,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, - P(dl.runtime); - P(dl.deadline); - } -+#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+ P(migrate_disable); -+#endif -+ P(nr_cpus_allowed); - #undef PN_SCHEDSTAT - #undef PN - #undef __PN --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch deleted file mode 100644 index 7ba951819..000000000 --- a/kernel/patches-4.19.x-rt/0028-sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch +++ /dev/null @@ -1,37 +0,0 @@ -From db2220843fd1c19c7b89db5f6e20382b5622fa05 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 9 Oct 2018 17:34:50 +0200 -Subject: [PATCH 028/328] sched/migrate_disable: Add export_symbol_gpl for - __migrate_disabled - -Jonathan reported that lttng/modules can't use __migrate_disabled(). -This function is only used by sched/core itself and the tracing -infrastructure to report the migrate counter (lttng does probably the -same). Since the rework migrate_disable() it moved from sched.h to -preempt.h and is became an exported function instead of a "static -inline" due to the header recursion of preempt vs sched. - -Since the compiler inlines the function for sched/core usage, add a -EXPORT_SYMBOL_GPL to allow the module/LTTNG usage. - -Reported-by: Jonathan Rajott -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d0450f06612c..e6022cc2605b 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1013,6 +1013,7 @@ int __migrate_disabled(struct task_struct *p) - { - return p->migrate_disable; - } -+EXPORT_SYMBOL_GPL(__migrate_disabled); - #endif - - static void __do_set_cpus_allowed_tail(struct task_struct *p, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch b/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch deleted file mode 100644 index 345f756c2..000000000 --- a/kernel/patches-4.19.x-rt/0029-arm-at91-do-not-disable-enable-clocks-in-a-row.patch +++ /dev/null @@ -1,97 +0,0 @@ -From b978b0a313d26ed5e51a9120c8744385a99e541a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 9 Mar 2016 10:51:06 +0100 -Subject: [PATCH 029/328] arm: at91: do not disable/enable clocks in a row - -Currently the driver will disable the clock and enable it one line later -if it is switching from periodic mode into one shot. -This can be avoided and causes a needless warning on -RT. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/clocksource/tcb_clksrc.c | 33 ++++++++++++++++++++++++++++---- - 1 file changed, 29 insertions(+), 4 deletions(-) - -diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c -index 43f4d5c4d6fa..de6baf564dfe 100644 ---- a/drivers/clocksource/tcb_clksrc.c -+++ b/drivers/clocksource/tcb_clksrc.c -@@ -126,6 +126,7 @@ static struct clocksource clksrc = { - struct tc_clkevt_device { - struct clock_event_device clkevt; - struct clk *clk; -+ bool clk_enabled; - void __iomem *regs; - }; - -@@ -143,6 +144,24 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) - */ - static u32 timer_clock; - -+static void tc_clk_disable(struct clock_event_device *d) -+{ -+ struct tc_clkevt_device *tcd = to_tc_clkevt(d); -+ -+ clk_disable(tcd->clk); -+ tcd->clk_enabled = false; -+} -+ -+static void tc_clk_enable(struct clock_event_device *d) -+{ -+ struct tc_clkevt_device *tcd = to_tc_clkevt(d); -+ -+ if (tcd->clk_enabled) -+ return; -+ clk_enable(tcd->clk); -+ tcd->clk_enabled = true; -+} -+ - static int tc_shutdown(struct clock_event_device *d) - { - struct tc_clkevt_device *tcd = to_tc_clkevt(d); -@@ -150,8 +169,14 @@ static int tc_shutdown(struct clock_event_device *d) - - writel(0xff, regs + ATMEL_TC_REG(2, IDR)); - writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); -+ return 0; -+} -+ -+static int tc_shutdown_clk_off(struct clock_event_device *d) -+{ -+ tc_shutdown(d); - if (!clockevent_state_detached(d)) -- clk_disable(tcd->clk); -+ tc_clk_disable(d); - - return 0; - } -@@ -164,7 +189,7 @@ static int tc_set_oneshot(struct clock_event_device *d) - if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) - tc_shutdown(d); - -- clk_enable(tcd->clk); -+ tc_clk_enable(d); - - /* slow clock, count up to RC, then irq and stop */ - writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | -@@ -186,7 +211,7 @@ static int tc_set_periodic(struct clock_event_device *d) - /* By not making the gentime core emulate periodic mode on top - * of oneshot, we get lower overhead and improved accuracy. - */ -- clk_enable(tcd->clk); -+ tc_clk_enable(d); - - /* slow clock, count up to RC, then irq and restart */ - writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, -@@ -220,7 +245,7 @@ static struct tc_clkevt_device clkevt = { - /* Should be lower than at91rm9200's system timer */ - .rating = 125, - .set_next_event = tc_next_event, -- .set_state_shutdown = tc_shutdown, -+ .set_state_shutdown = tc_shutdown_clk_off, - .set_state_periodic = tc_set_periodic, - .set_state_oneshot = tc_set_oneshot, - }, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch b/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch deleted file mode 100644 index ad025f020..000000000 --- a/kernel/patches-4.19.x-rt/0030-clocksource-TCLIB-Allow-higher-clock-rates-for-clock.patch +++ /dev/null @@ -1,169 +0,0 @@ -From 8549d4577f0573dddbc34e310c4310920a6bb714 Mon Sep 17 00:00:00 2001 -From: Benedikt Spranger -Date: Mon, 8 Mar 2010 18:57:04 +0100 -Subject: [PATCH 030/328] clocksource: TCLIB: Allow higher clock rates for - clock events -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -As default the TCLIB uses the 32KiHz base clock rate for clock events. -Add a compile time selection to allow higher clock resulution. - -(fixed up by Sami Pietikäinen ) - -Signed-off-by: Benedikt Spranger -Signed-off-by: Thomas Gleixner ---- - drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++------------- - drivers/misc/Kconfig | 12 +++++++++-- - 2 files changed, 31 insertions(+), 17 deletions(-) - -diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c -index de6baf564dfe..ba15242a6066 100644 ---- a/drivers/clocksource/tcb_clksrc.c -+++ b/drivers/clocksource/tcb_clksrc.c -@@ -25,8 +25,7 @@ - * this 32 bit free-running counter. the second channel is not used. - * - * - The third channel may be used to provide a 16-bit clockevent -- * source, used in either periodic or oneshot mode. This runs -- * at 32 KiHZ, and can handle delays of up to two seconds. -+ * source, used in either periodic or oneshot mode. - * - * A boot clocksource and clockevent source are also currently needed, - * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so -@@ -127,6 +126,7 @@ struct tc_clkevt_device { - struct clock_event_device clkevt; - struct clk *clk; - bool clk_enabled; -+ u32 freq; - void __iomem *regs; - }; - -@@ -135,13 +135,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) - return container_of(clkevt, struct tc_clkevt_device, clkevt); - } - --/* For now, we always use the 32K clock ... this optimizes for NO_HZ, -- * because using one of the divided clocks would usually mean the -- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). -- * -- * A divided clock could be good for high resolution timers, since -- * 30.5 usec resolution can seem "low". -- */ - static u32 timer_clock; - - static void tc_clk_disable(struct clock_event_device *d) -@@ -191,7 +184,7 @@ static int tc_set_oneshot(struct clock_event_device *d) - - tc_clk_enable(d); - -- /* slow clock, count up to RC, then irq and stop */ -+ /* count up to RC, then irq and stop */ - writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | - ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -213,10 +206,10 @@ static int tc_set_periodic(struct clock_event_device *d) - */ - tc_clk_enable(d); - -- /* slow clock, count up to RC, then irq and restart */ -+ /* count up to RC, then irq and restart */ - writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); -- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); -+ writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); - - /* Enable clock and interrupts on RC compare */ - writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -243,7 +236,11 @@ static struct tc_clkevt_device clkevt = { - .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, - /* Should be lower than at91rm9200's system timer */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - .rating = 125, -+#else -+ .rating = 200, -+#endif - .set_next_event = tc_next_event, - .set_state_shutdown = tc_shutdown_clk_off, - .set_state_periodic = tc_set_periodic, -@@ -265,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void *handle) - return IRQ_NONE; - } - --static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) -+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) - { -+ unsigned divisor = atmel_tc_divisors[divisor_idx]; - int ret; - struct clk *t2_clk = tc->clk[2]; - int irq = tc->irq[2]; -@@ -287,7 +285,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) - clkevt.regs = tc->regs; - clkevt.clk = t2_clk; - -- timer_clock = clk32k_divisor_idx; -+ timer_clock = divisor_idx; -+ if (!divisor) -+ clkevt.freq = 32768; -+ else -+ clkevt.freq = clk_get_rate(t2_clk) / divisor; - - clkevt.clkevt.cpumask = cpumask_of(0); - -@@ -298,7 +300,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) - return ret; - } - -- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); -+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); - - return ret; - } -@@ -435,7 +437,11 @@ static int __init tcb_clksrc_init(void) - goto err_disable_t1; - - /* channel 2: periodic and oneshot timer support */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - ret = setup_clkevents(tc, clk32k_divisor_idx); -+#else -+ ret = setup_clkevents(tc, best_divisor_idx); -+#endif - if (ret) - goto err_unregister_clksrc; - -diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig -index 3726eacdf65d..0900dec7ec04 100644 ---- a/drivers/misc/Kconfig -+++ b/drivers/misc/Kconfig -@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC - are combined to make a single 32-bit timer. - - When GENERIC_CLOCKEVENTS is defined, the third timer channel -- may be used as a clock event device supporting oneshot mode -- (delays of up to two seconds) based on the 32 KiHz clock. -+ may be used as a clock event device supporting oneshot mode. - - config ATMEL_TCB_CLKSRC_BLOCK - int -@@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK - TC can be used for other purposes, such as PWM generation and - interval timing. - -+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK -+ bool "TC Block use 32 KiHz clock" -+ depends on ATMEL_TCB_CLKSRC -+ default y -+ help -+ Select this to use 32 KiHz base clock rate as TC block clock -+ source for clock events. -+ -+ - config DUMMY_IRQ - tristate "Dummy IRQ handler" - default n --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch b/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch deleted file mode 100644 index 1086979bc..000000000 --- a/kernel/patches-4.19.x-rt/0031-timekeeping-Split-jiffies-seqlock.patch +++ /dev/null @@ -1,170 +0,0 @@ -From 4948d026b217faebc43bfe4a21cb3c5e781508ad Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 14 Feb 2013 22:36:59 +0100 -Subject: [PATCH 031/328] timekeeping: Split jiffies seqlock - -Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so -it can be taken in atomic context on RT. - -Signed-off-by: Thomas Gleixner ---- - kernel/time/jiffies.c | 7 ++++--- - kernel/time/tick-common.c | 10 ++++++---- - kernel/time/tick-sched.c | 19 ++++++++++++------- - kernel/time/timekeeping.c | 6 ++++-- - kernel/time/timekeeping.h | 3 ++- - 5 files changed, 28 insertions(+), 17 deletions(-) - -diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c -index 497719127bf9..62acb8914c9e 100644 ---- a/kernel/time/jiffies.c -+++ b/kernel/time/jiffies.c -@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = { - .max_cycles = 10, - }; - --__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); -+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); -+__cacheline_aligned_in_smp seqcount_t jiffies_seq; - - #if (BITS_PER_LONG < 64) - u64 get_jiffies_64(void) -@@ -83,9 +84,9 @@ u64 get_jiffies_64(void) - u64 ret; - - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - ret = jiffies_64; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - return ret; - } - EXPORT_SYMBOL(get_jiffies_64); -diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c -index a02e0f6b287c..32f5101f07ce 100644 ---- a/kernel/time/tick-common.c -+++ b/kernel/time/tick-common.c -@@ -79,13 +79,15 @@ int tick_is_oneshot_available(void) - static void tick_periodic(int cpu) - { - if (tick_do_timer_cpu == cpu) { -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - - /* Keep track of the next tick event */ - tick_next_period = ktime_add(tick_next_period, tick_period); - - do_timer(1); -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } - -@@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) - ktime_t next; - - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - next = tick_next_period; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - - clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); - -diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 48403fb653c2..e774a49176cc 100644 ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -68,7 +68,8 @@ static void tick_do_update_jiffies64(ktime_t now) - return; - - /* Reevaluate with jiffies_lock held */ -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - - delta = ktime_sub(now, last_jiffies_update); - if (delta >= tick_period) { -@@ -94,10 +95,12 @@ static void tick_do_update_jiffies64(ktime_t now) - /* Keep the tick_next_period variable up to date */ - tick_next_period = ktime_add(last_jiffies_update, tick_period); - } else { -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - return; - } -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } - -@@ -108,12 +111,14 @@ static ktime_t tick_init_jiffy_update(void) - { - ktime_t period; - -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - /* Did we start the jiffies update yet ? */ - if (last_jiffies_update == 0) - last_jiffies_update = tick_next_period; - period = last_jiffies_update; -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - return period; - } - -@@ -656,10 +661,10 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) - - /* Read jiffies and the time when jiffies were updated last */ - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - basemono = last_jiffies_update; - basejiff = jiffies; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - ts->last_jiffies = basejiff; - ts->timer_expires_base = basemono; - -diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c -index 81ee5b83c920..512db778f442 100644 ---- a/kernel/time/timekeeping.c -+++ b/kernel/time/timekeeping.c -@@ -2394,8 +2394,10 @@ EXPORT_SYMBOL(hardpps); - */ - void xtime_update(unsigned long ticks) - { -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - do_timer(ticks); -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } -diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h -index 141ab3ab0354..099737f6f10c 100644 ---- a/kernel/time/timekeeping.h -+++ b/kernel/time/timekeeping.h -@@ -25,7 +25,8 @@ static inline void sched_clock_resume(void) { } - extern void do_timer(unsigned long ticks); - extern void update_wall_time(void); - --extern seqlock_t jiffies_lock; -+extern raw_spinlock_t jiffies_lock; -+extern seqcount_t jiffies_seq; - - #define CS_NAME_LEN 32 - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch b/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch deleted file mode 100644 index 3f73d08e0..000000000 --- a/kernel/patches-4.19.x-rt/0032-signal-Revert-ptrace-preempt-magic.patch +++ /dev/null @@ -1,37 +0,0 @@ -From dad624b7531ae0a0275cab3c82ea0d7c6a29cc7c Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 21 Sep 2011 19:57:12 +0200 -Subject: [PATCH 032/328] signal: Revert ptrace preempt magic - -Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more -than a bandaid around the ptrace design trainwreck. It's not a -correctness issue, it's merily a cosmetic bandaid. - -Signed-off-by: Thomas Gleixner ---- - kernel/signal.c | 8 -------- - 1 file changed, 8 deletions(-) - -diff --git a/kernel/signal.c b/kernel/signal.c -index 08911bb6fe9a..5e278f1540ad 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2103,15 +2103,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) - if (gstop_done && ptrace_reparented(current)) - do_notify_parent_cldstop(current, false, why); - -- /* -- * Don't want to allow preemption here, because -- * sys_ptrace() needs this task to be inactive. -- * -- * XXX: implement read_unlock_no_resched(). -- */ -- preempt_disable(); - read_unlock(&tasklist_lock); -- preempt_enable_no_resched(); - freezable_schedule(); - } else { - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch b/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch deleted file mode 100644 index f07dcdce6..000000000 --- a/kernel/patches-4.19.x-rt/0033-net-sched-Use-msleep-instead-of-yield.patch +++ /dev/null @@ -1,63 +0,0 @@ -From 5b974aebb7a0797ecc4c47dda6158e8c6788d50b Mon Sep 17 00:00:00 2001 -From: Marc Kleine-Budde -Date: Wed, 5 Mar 2014 00:49:47 +0100 -Subject: [PATCH 033/328] net: sched: Use msleep() instead of yield() - -On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50 -(by default). If a high priority userspace process tries to shut down a busy -network interface it might spin in a yield loop waiting for the device to -become idle. With the interrupt thread having a lower priority than the -looping process it might never be scheduled and so result in a deadlock on UP -systems. - -With Magic SysRq the following backtrace can be produced: - -> test_app R running 0 174 168 0x00000000 -> [] (__schedule+0x220/0x3fc) from [] (preempt_schedule_irq+0x48/0x80) -> [] (preempt_schedule_irq+0x48/0x80) from [] (svc_preempt+0x8/0x20) -> [] (svc_preempt+0x8/0x20) from [] (local_bh_enable+0x18/0x88) -> [] (local_bh_enable+0x18/0x88) from [] (dev_deactivate_many+0x220/0x264) -> [] (dev_deactivate_many+0x220/0x264) from [] (__dev_close_many+0x64/0xd4) -> [] (__dev_close_many+0x64/0xd4) from [] (__dev_close+0x28/0x3c) -> [] (__dev_close+0x28/0x3c) from [] (__dev_change_flags+0x88/0x130) -> [] (__dev_change_flags+0x88/0x130) from [] (dev_change_flags+0x10/0x48) -> [] (dev_change_flags+0x10/0x48) from [] (do_setlink+0x370/0x7ec) -> [] (do_setlink+0x370/0x7ec) from [] (rtnl_newlink+0x2b4/0x450) -> [] (rtnl_newlink+0x2b4/0x450) from [] (rtnetlink_rcv_msg+0x158/0x1f4) -> [] (rtnetlink_rcv_msg+0x158/0x1f4) from [] (netlink_rcv_skb+0xac/0xc0) -> [] (netlink_rcv_skb+0xac/0xc0) from [] (rtnetlink_rcv+0x18/0x24) -> [] (rtnetlink_rcv+0x18/0x24) from [] (netlink_unicast+0x13c/0x198) -> [] (netlink_unicast+0x13c/0x198) from [] (netlink_sendmsg+0x264/0x2e0) -> [] (netlink_sendmsg+0x264/0x2e0) from [] (sock_sendmsg+0x78/0x98) -> [] (sock_sendmsg+0x78/0x98) from [] (___sys_sendmsg.part.25+0x268/0x278) -> [] (___sys_sendmsg.part.25+0x268/0x278) from [] (__sys_sendmsg+0x48/0x78) -> [] (__sys_sendmsg+0x48/0x78) from [] (ret_fast_syscall+0x0/0x2c) - -This patch works around the problem by replacing yield() by msleep(1), giving -the interrupt thread time to finish, similar to other changes contained in the -rt patch set. Using wait_for_completion() instead would probably be a better -solution. - - -Signed-off-by: Marc Kleine-Budde -Signed-off-by: Sebastian Andrzej Siewior ---- - net/sched/sch_generic.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c -index 8a4d01e427a2..4ab20f1138fd 100644 ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -1204,7 +1204,7 @@ void dev_deactivate_many(struct list_head *head) - /* Wait for outstanding qdisc_run calls. */ - list_for_each_entry(dev, head, close_list) { - while (some_qdisc_is_busy(dev)) -- yield(); -+ msleep(1); - /* The new qdisc is assigned at this point so we can safely - * unwind stale skb lists and qdisc statistics - */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch b/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch deleted file mode 100644 index 99ed4d9ac..000000000 --- a/kernel/patches-4.19.x-rt/0034-dm-rq-remove-BUG_ON-irqs_disabled-check.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 38dbd44808bcdd34f0b973698b0f9bd65d2f2db5 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 27 Mar 2018 16:24:15 +0200 -Subject: [PATCH 034/328] dm rq: remove BUG_ON(!irqs_disabled) check - -In commit 052189a2ec95 ("dm: remove superfluous irq disablement in -dm_request_fn") the spin_lock_irq() was replaced with spin_lock() + a -check for disabled interrupts. Later the locking part was removed in -commit 2eb6e1e3aa87 ("dm: submit stacked requests in irq enabled -context") but the BUG_ON() check remained. - -Since the original purpose for the "are-irqs-off" check is gone (the -->queue_lock has been removed) remove it. - -Cc: Keith Busch -Cc: Mike Snitzer -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/md/dm-rq.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c -index 4d36373e1c0f..12ed08245130 100644 ---- a/drivers/md/dm-rq.c -+++ b/drivers/md/dm-rq.c -@@ -692,7 +692,6 @@ static void dm_old_request_fn(struct request_queue *q) - /* Establish tio->ti before queuing work (map_tio_request) */ - tio->ti = ti; - kthread_queue_work(&md->kworker, &tio->work); -- BUG_ON(!irqs_disabled()); - } - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch b/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch deleted file mode 100644 index 5df7c8925..000000000 --- a/kernel/patches-4.19.x-rt/0035-usb-do-no-disable-interrupts-in-giveback.patch +++ /dev/null @@ -1,45 +0,0 @@ -From f31d5f36bfd80c261ba37fe3b8849f2be819c088 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 8 Nov 2013 17:34:54 +0100 -Subject: [PATCH 035/328] usb: do no disable interrupts in giveback - -Since commit 94dfd7ed ("USB: HCD: support giveback of URB in tasklet -context") the USB code disables interrupts before invoking the complete -callback. -This should not be required the HCD completes the URBs either in hard-irq -context or in BH context. Lockdep may report false positives if one has two -HCDs (one completes in IRQ and the other in BH context) and is using the same -USB driver (device) with both HCDs. This is safe since the same URBs are never -mixed with those two HCDs. -Longeterm we should force all HCDs to complete in the same context. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/usb/core/hcd.c | 3 --- - 1 file changed, 3 deletions(-) - -diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c -index b82a7d787add..2f3015356124 100644 ---- a/drivers/usb/core/hcd.c -+++ b/drivers/usb/core/hcd.c -@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) - struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); - struct usb_anchor *anchor = urb->anchor; - int status = urb->unlinked; -- unsigned long flags; - - urb->hcpriv = NULL; - if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && -@@ -1766,9 +1765,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb) - * and no one may trigger the above deadlock situation when - * running complete() in tasklet. - */ -- local_irq_save(flags); - urb->complete(urb); -- local_irq_restore(flags); - - usb_anchor_resume_wakeups(anchor); - atomic_dec(&urb->use_count); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch b/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch deleted file mode 100644 index d5b2435b1..000000000 --- a/kernel/patches-4.19.x-rt/0036-rt-Provide-PREEMPT_RT_BASE-config-switch.patch +++ /dev/null @@ -1,63 +0,0 @@ -From f93f63735dec865d4013677969324e66da7f02c4 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 17 Jun 2011 12:39:57 +0200 -Subject: [PATCH 036/328] rt: Provide PREEMPT_RT_BASE config switch - -Introduce PREEMPT_RT_BASE which enables parts of -PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT -substitutions for testing. - -Signed-off-by: Thomas Gleixner ---- - kernel/Kconfig.preempt | 21 ++++++++++++++++++--- - 1 file changed, 18 insertions(+), 3 deletions(-) - -diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt -index cd1655122ec0..027db5976c2f 100644 ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -1,3 +1,10 @@ -+config PREEMPT -+ bool -+ select PREEMPT_COUNT -+ -+config PREEMPT_RT_BASE -+ bool -+ select PREEMPT - - choice - prompt "Preemption Model" -@@ -34,10 +41,10 @@ config PREEMPT_VOLUNTARY - - Select this if you are building a kernel for a desktop system. - --config PREEMPT -+config PREEMPT__LL - bool "Preemptible Kernel (Low-Latency Desktop)" - depends on !ARCH_NO_PREEMPT -- select PREEMPT_COUNT -+ select PREEMPT - select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK - help - This option reduces the latency of the kernel by making -@@ -54,7 +61,15 @@ config PREEMPT - embedded system with latency requirements in the milliseconds - range. - -+config PREEMPT_RTB -+ bool "Preemptible Kernel (Basic RT)" -+ select PREEMPT_RT_BASE -+ help -+ This option is basically the same as (Low-Latency Desktop) but -+ enables changes which are preliminary for the full preemptible -+ RT kernel. -+ - endchoice - - config PREEMPT_COUNT -- bool -\ No newline at end of file -+ bool --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch b/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch deleted file mode 100644 index 298ed7566..000000000 --- a/kernel/patches-4.19.x-rt/0037-cpumask-Disable-CONFIG_CPUMASK_OFFSTACK-for-RT.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 824fc9b2ae92b317da3e2a42406a49f330e20a6d Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 14 Dec 2011 01:03:49 +0100 -Subject: [PATCH 037/328] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT - -There are "valid" GFP_ATOMIC allocations such as - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 -|in_atomic(): 1, irqs_disabled(): 0, pid: 2130, name: tar -|1 lock held by tar/2130: -| #0: (&mm->mmap_sem){++++++}, at: [] SyS_brk+0x39/0x190 -|Preemption disabled at:[] flush_tlb_mm_range+0x28/0x350 -| -|CPU: 1 PID: 2130 Comm: tar Tainted: G W 4.8.2-rt2+ #747 -|Call Trace: -| [] dump_stack+0x86/0xca -| [] ___might_sleep+0x14b/0x240 -| [] rt_spin_lock+0x24/0x60 -| [] get_page_from_freelist+0x83a/0x11b0 -| [] __alloc_pages_nodemask+0x15b/0x1190 -| [] alloc_pages_current+0xa1/0x1f0 -| [] new_slab+0x3e5/0x690 -| [] ___slab_alloc+0x495/0x660 -| [] __slab_alloc.isra.79+0x71/0xc0 -| [] __kmalloc_node+0xe7/0x240 -| [] alloc_cpumask_var_node+0x20/0x50 -| [] alloc_cpumask_var+0xe/0x10 -| [] native_send_call_func_ipi+0x21/0x130 -| [] smp_call_function_many+0x22f/0x370 -| [] native_flush_tlb_others+0x1a4/0x3a0 -| [] flush_tlb_mm_range+0x7b/0x350 -| [] tlb_flush_mmu_tlbonly+0x62/0xd0 -| [] tlb_finish_mmu+0x14/0x50 -| [] unmap_region+0xe4/0x110 -| [] do_munmap+0x293/0x470 -| [] SyS_brk+0x13c/0x190 -| [] do_fast_syscall_32+0xb2/0x2f0 -| [] entry_SYSENTER_compat+0x51/0x60 - -which forbid allocations at run-time. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/Kconfig | 2 +- - lib/Kconfig | 1 + - 2 files changed, 2 insertions(+), 1 deletion(-) - -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index af35f5caadbe..e40ba59efe7f 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -934,7 +934,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT - config MAXSMP - bool "Enable Maximum number of SMP Processors and NUMA Nodes" - depends on X86_64 && SMP && DEBUG_KERNEL -- select CPUMASK_OFFSTACK -+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL - ---help--- - Enable maximum number of CPUS and NUMA Nodes for this architecture. - If unsure, say N. -diff --git a/lib/Kconfig b/lib/Kconfig -index a3928d4438b5..a50b2158f7cd 100644 ---- a/lib/Kconfig -+++ b/lib/Kconfig -@@ -441,6 +441,7 @@ config CHECK_SIGNATURE - - config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS -+ depends on !PREEMPT_RT_FULL - help - Use dynamic allocation for cpumask_var_t, instead of putting - them on the stack. This is a bit more expensive, but avoids --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch b/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch deleted file mode 100644 index 1af339a2b..000000000 --- a/kernel/patches-4.19.x-rt/0038-jump-label-disable-if-stop_machine-is-used.patch +++ /dev/null @@ -1,41 +0,0 @@ -From feb8e4e9bfee1c054ec0c83ae2a12897d85da9b3 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 8 Jul 2015 17:14:48 +0200 -Subject: [PATCH 038/328] jump-label: disable if stop_machine() is used - -Some architectures are using stop_machine() while switching the opcode which -leads to latency spikes. -The architectures which use stop_machine() atm: -- ARM stop machine -- s390 stop machine - -The architecures which use other sorcery: -- MIPS -- X86 -- powerpc -- sparc -- arm64 - -Signed-off-by: Thomas Gleixner -[bigeasy: only ARM for now] -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index e2f7c50dbace..91ba9fe945ff 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -51,7 +51,7 @@ config ARM - select HARDIRQS_SW_RESEND - select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) - select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 -- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU -+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE - select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU - select HAVE_ARCH_MMAP_RND_BITS if MMU - select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch b/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch deleted file mode 100644 index 2bed68593..000000000 --- a/kernel/patches-4.19.x-rt/0039-kconfig-Disable-config-options-which-are-not-RT-comp.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 521872fd8b19b111a7001f6388912ab9d506f741 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 24 Jul 2011 12:11:43 +0200 -Subject: [PATCH 039/328] kconfig: Disable config options which are not RT - compatible - -Disable stuff which is known to have issues on RT - -Signed-off-by: Thomas Gleixner ---- - arch/Kconfig | 1 + - mm/Kconfig | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - -diff --git a/arch/Kconfig b/arch/Kconfig -index a336548487e6..3f537b264852 100644 ---- a/arch/Kconfig -+++ b/arch/Kconfig -@@ -28,6 +28,7 @@ config OPROFILE - tristate "OProfile system profiling" - depends on PROFILING - depends on HAVE_OPROFILE -+ depends on !PREEMPT_RT_FULL - select RING_BUFFER - select RING_BUFFER_ALLOW_SWAP - help -diff --git a/mm/Kconfig b/mm/Kconfig -index b457e94ae618..0dddbb2a3282 100644 ---- a/mm/Kconfig -+++ b/mm/Kconfig -@@ -377,7 +377,7 @@ config NOMMU_INITIAL_TRIM_EXCESS - - config TRANSPARENT_HUGEPAGE - bool "Transparent Hugepage Support" -- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE -+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL - select COMPACTION - select RADIX_TREE_MULTIORDER - help --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch b/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch deleted file mode 100644 index b42b5849d..000000000 --- a/kernel/patches-4.19.x-rt/0040-lockdep-disable-self-test.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 464fd34dd6dc53c357221cff371377ca19601e14 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 17 Oct 2017 16:36:18 +0200 -Subject: [PATCH 040/328] lockdep: disable self-test -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The self-test wasn't always 100% accurate for RT. We disabled a few -tests which failed because they had a different semantic for RT. Some -still reported false positives. Now the selftest locks up the system -during boot and it needs to be investigated… - -Signed-off-by: Sebastian Andrzej Siewior ---- - lib/Kconfig.debug | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 46a910acce3f..38cf7f81daa7 100644 ---- a/lib/Kconfig.debug -+++ b/lib/Kconfig.debug -@@ -1207,7 +1207,7 @@ config DEBUG_ATOMIC_SLEEP - - config DEBUG_LOCKING_API_SELFTESTS - bool "Locking API boot-time self-tests" -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && !PREEMPT_RT_FULL - help - Say Y here if you want the kernel to run a short self-test during - bootup. The self-test checks whether common types of locking bugs --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch b/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch deleted file mode 100644 index de669f423..000000000 --- a/kernel/patches-4.19.x-rt/0041-mm-Allow-only-slub-on-RT.patch +++ /dev/null @@ -1,36 +0,0 @@ -From bb8c948b260e99e7c7ad2dc38ea03a958be18769 Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:44:03 -0500 -Subject: [PATCH 041/328] mm: Allow only slub on RT - -Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - init/Kconfig | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/init/Kconfig b/init/Kconfig -index 47035b5a46f6..ae9a0113a699 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1637,6 +1637,7 @@ choice - - config SLAB - bool "SLAB" -+ depends on !PREEMPT_RT_FULL - select HAVE_HARDENED_USERCOPY_ALLOCATOR - help - The regular slab allocator that is established and known to work -@@ -1657,6 +1658,7 @@ config SLUB - config SLOB - depends on EXPERT - bool "SLOB (Simple Allocator)" -+ depends on !PREEMPT_RT_FULL - help - SLOB replaces the stock allocator with a drastically simpler - allocator. SLOB is generally more space efficient but --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch b/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch deleted file mode 100644 index a473ecb25..000000000 --- a/kernel/patches-4.19.x-rt/0042-locking-Disable-spin-on-owner-for-RT.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 6bbedb933d43f1bc2283d96523412298d765b8a2 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:51:45 +0200 -Subject: [PATCH 042/328] locking: Disable spin on owner for RT -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Drop spin on owner for mutex / rwsem. We are most likely not using it -but… - -Signed-off-by: Thomas Gleixner ---- - kernel/Kconfig.locks | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks -index 84d882f3e299..af27c4000812 100644 ---- a/kernel/Kconfig.locks -+++ b/kernel/Kconfig.locks -@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW - - config MUTEX_SPIN_ON_OWNER - def_bool y -- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW -+ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL - - config RWSEM_SPIN_ON_OWNER - def_bool y -- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW -+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL - - config LOCK_SPIN_ON_OWNER - def_bool y --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch b/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch deleted file mode 100644 index 333cc4080..000000000 --- a/kernel/patches-4.19.x-rt/0043-rcu-Disable-RCU_FAST_NO_HZ-on-RT.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0942d8d1880802a3a19df4dfdff1ec5769d92fe3 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 28 Oct 2012 13:26:09 +0000 -Subject: [PATCH 043/328] rcu: Disable RCU_FAST_NO_HZ on RT - -This uses a timer_list timer from the irq disabled guts of the idle -code. Disable it for now to prevent wreckage. - -Signed-off-by: Thomas Gleixner ---- - kernel/rcu/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig -index 9210379c0353..644264be90f0 100644 ---- a/kernel/rcu/Kconfig -+++ b/kernel/rcu/Kconfig -@@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF - - config RCU_FAST_NO_HZ - bool "Accelerate last non-dyntick-idle CPU's grace periods" -- depends on NO_HZ_COMMON && SMP && RCU_EXPERT -+ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL - default n - help - This option permits CPUs to enter dynticks-idle state even if --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch b/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch deleted file mode 100644 index a5e8953a8..000000000 --- a/kernel/patches-4.19.x-rt/0044-rcu-make-RCU_BOOST-default-on-RT.patch +++ /dev/null @@ -1,33 +0,0 @@ -From b784c987142020d5cc32de03823004d362b390ec Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 21 Mar 2014 20:19:05 +0100 -Subject: [PATCH 044/328] rcu: make RCU_BOOST default on RT - -Since it is no longer invoked from the softirq people run into OOM more -often if the priority of the RCU thread is too low. Making boosting -default on RT should help in those case and it can be switched off if -someone knows better. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/Kconfig | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig -index 644264be90f0..a243a78ff38c 100644 ---- a/kernel/rcu/Kconfig -+++ b/kernel/rcu/Kconfig -@@ -190,8 +190,8 @@ config RCU_FAST_NO_HZ - - config RCU_BOOST - bool "Enable RCU priority boosting" -- depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT -- default n -+ depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT_FULL -+ default y if PREEMPT_RT_FULL - help - This option boosts the priority of preempted RCU readers that - block the current preemptible RCU grace period for too long. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch b/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch deleted file mode 100644 index 22a79886f..000000000 --- a/kernel/patches-4.19.x-rt/0045-sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 648e8c04474df9ed71c649af1d1e5a161cddaf41 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 17:03:52 +0200 -Subject: [PATCH 045/328] sched: Disable CONFIG_RT_GROUP_SCHED on RT - -Carsten reported problems when running: - - taskset 01 chrt -f 1 sleep 1 - -from within rc.local on a F15 machine. The task stays running and -never gets on the run queue because some of the run queues have -rt_throttled=1 which does not go away. Works nice from a ssh login -shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well. - -Signed-off-by: Thomas Gleixner ---- - init/Kconfig | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/init/Kconfig b/init/Kconfig -index ae9a0113a699..61e8b531649b 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -784,6 +784,7 @@ config CFS_BANDWIDTH - config RT_GROUP_SCHED - bool "Group scheduling for SCHED_RR/FIFO" - depends on CGROUP_SCHED -+ depends on !PREEMPT_RT_FULL - default n - help - This feature lets you explicitly allocate real CPU bandwidth --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch b/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch deleted file mode 100644 index f2fd8f604..000000000 --- a/kernel/patches-4.19.x-rt/0046-net-core-disable-NET_RX_BUSY_POLL.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 0b90609a04c39529c4ff712a4786aecde55a0733 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Sat, 27 May 2017 19:02:06 +0200 -Subject: [PATCH 046/328] net/core: disable NET_RX_BUSY_POLL -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -sk_busy_loop() does preempt_disable() followed by a few operations which can -take sleeping locks and may get long. -I _think_ that we could use preempt_disable_nort() (in sk_busy_loop()) instead -but after a successfull cmpxchg(&napi->state, …) we would gain the ressource -and could be scheduled out. At this point nobody knows who (which context) owns -it and so it could take a while until the state is realeased and napi_poll() -could be invoked again. - -Signed-off-by: Sebastian Andrzej Siewior ---- - net/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/net/Kconfig b/net/Kconfig -index 228dfa382eec..bc8d01996f22 100644 ---- a/net/Kconfig -+++ b/net/Kconfig -@@ -275,7 +275,7 @@ config CGROUP_NET_CLASSID - - config NET_RX_BUSY_POLL - bool -- default y -+ default y if !PREEMPT_RT_FULL - - config BQL - bool --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch b/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch deleted file mode 100644 index e766e5474..000000000 --- a/kernel/patches-4.19.x-rt/0047-arm-disable-NEON-in-kernel-mode.patch +++ /dev/null @@ -1,165 +0,0 @@ -From af731f1e8edb7e93c5977a0da70bd61c5d9fa7b1 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 1 Dec 2017 10:42:03 +0100 -Subject: [PATCH 047/328] arm*: disable NEON in kernel mode - -NEON in kernel mode is used by the crypto algorithms and raid6 code. -While the raid6 code looks okay, the crypto algorithms do not: NEON -is enabled on first invocation and may allocate/free/map memory before -the NEON mode is disabled again. -This needs to be changed until it can be enabled. -On ARM NEON in kernel mode can be simply disabled. on ARM64 it needs to -stay on due to possible EFI callbacks so here I disable each algorithm. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/Kconfig | 2 +- - arch/arm64/crypto/Kconfig | 28 ++++++++++++++-------------- - arch/arm64/crypto/crc32-ce-glue.c | 3 ++- - 3 files changed, 17 insertions(+), 16 deletions(-) - -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 91ba9fe945ff..bd9d180db5c7 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -2163,7 +2163,7 @@ config NEON - - config KERNEL_MODE_NEON - bool "Support for NEON in kernel mode" -- depends on NEON && AEABI -+ depends on NEON && AEABI && !PREEMPT_RT_BASE - help - Say Y to include support for NEON in kernel mode. - -diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig -index d51944ff9f91..0d4b3f0cfba6 100644 ---- a/arch/arm64/crypto/Kconfig -+++ b/arch/arm64/crypto/Kconfig -@@ -19,43 +19,43 @@ config CRYPTO_SHA512_ARM64 - - config CRYPTO_SHA1_ARM64_CE - tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_HASH - select CRYPTO_SHA1 - - config CRYPTO_SHA2_ARM64_CE - tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_HASH - select CRYPTO_SHA256_ARM64 - - config CRYPTO_SHA512_ARM64_CE - tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_HASH - select CRYPTO_SHA512_ARM64 - - config CRYPTO_SHA3_ARM64 - tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_HASH - select CRYPTO_SHA3 - - config CRYPTO_SM3_ARM64_CE - tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_HASH - select CRYPTO_SM3 - - config CRYPTO_SM4_ARM64_CE - tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_ALGAPI - select CRYPTO_SM4 - - config CRYPTO_GHASH_ARM64_CE - tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_HASH - select CRYPTO_GF128MUL - select CRYPTO_AES -@@ -63,7 +63,7 @@ config CRYPTO_GHASH_ARM64_CE - - config CRYPTO_CRCT10DIF_ARM64_CE - tristate "CRCT10DIF digest algorithm using PMULL instructions" -- depends on KERNEL_MODE_NEON && CRC_T10DIF -+ depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE - select CRYPTO_HASH - - config CRYPTO_CRC32_ARM64_CE -@@ -77,13 +77,13 @@ config CRYPTO_AES_ARM64 - - config CRYPTO_AES_ARM64_CE - tristate "AES core cipher using ARMv8 Crypto Extensions" -- depends on ARM64 && KERNEL_MODE_NEON -+ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_ALGAPI - select CRYPTO_AES_ARM64 - - config CRYPTO_AES_ARM64_CE_CCM - tristate "AES in CCM mode using ARMv8 Crypto Extensions" -- depends on ARM64 && KERNEL_MODE_NEON -+ depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_ALGAPI - select CRYPTO_AES_ARM64_CE - select CRYPTO_AES_ARM64 -@@ -91,7 +91,7 @@ config CRYPTO_AES_ARM64_CE_CCM - - config CRYPTO_AES_ARM64_CE_BLK - tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_BLKCIPHER - select CRYPTO_AES_ARM64_CE - select CRYPTO_AES_ARM64 -@@ -99,7 +99,7 @@ config CRYPTO_AES_ARM64_CE_BLK - - config CRYPTO_AES_ARM64_NEON_BLK - tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_BLKCIPHER - select CRYPTO_AES_ARM64 - select CRYPTO_AES -@@ -107,13 +107,13 @@ config CRYPTO_AES_ARM64_NEON_BLK - - config CRYPTO_CHACHA20_NEON - tristate "NEON accelerated ChaCha20 symmetric cipher" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_BLKCIPHER - select CRYPTO_CHACHA20 - - config CRYPTO_AES_ARM64_BS - tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm" -- depends on KERNEL_MODE_NEON -+ depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE - select CRYPTO_BLKCIPHER - select CRYPTO_AES_ARM64_NEON_BLK - select CRYPTO_AES_ARM64 -diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c -index 34b4e3d46aab..ae055cdad8cf 100644 ---- a/arch/arm64/crypto/crc32-ce-glue.c -+++ b/arch/arm64/crypto/crc32-ce-glue.c -@@ -208,7 +208,8 @@ static struct shash_alg crc32_pmull_algs[] = { { - - static int __init crc32_pmull_mod_init(void) - { -- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) { -+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && -+ !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && (elf_hwcap & HWCAP_PMULL)) { - crc32_pmull_algs[0].update = crc32_pmull_update; - crc32_pmull_algs[1].update = crc32c_pmull_update; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch b/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch deleted file mode 100644 index 907e877bf..000000000 --- a/kernel/patches-4.19.x-rt/0048-powerpc-Use-generic-rwsem-on-RT.patch +++ /dev/null @@ -1,32 +0,0 @@ -From c90bc1f0bbce77f2baf2b4213125fb5b7870fc20 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 048/328] powerpc: Use generic rwsem on RT - -Use generic code which uses rtmutex - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/Kconfig | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index 6f475dc5829b..3d5c86336072 100644 ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT - - config RWSEM_GENERIC_SPINLOCK - bool -+ default y if PREEMPT_RT_FULL - - config RWSEM_XCHGADD_ALGORITHM - bool -- default y -+ default y if !PREEMPT_RT_FULL - - config GENERIC_LOCKBREAK - bool --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch deleted file mode 100644 index f63cc135d..000000000 --- a/kernel/patches-4.19.x-rt/0049-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 3460880af8146f0e3e05acd590e7e52d450bbf80 Mon Sep 17 00:00:00 2001 -From: Bogdan Purcareata -Date: Fri, 24 Apr 2015 15:53:13 +0000 -Subject: [PATCH 049/328] powerpc/kvm: Disable in-kernel MPIC emulation for - PREEMPT_RT_FULL - -While converting the openpic emulation code to use a raw_spinlock_t enables -guests to run on RT, there's still a performance issue. For interrupts sent in -directed delivery mode with a multiple CPU mask, the emulated openpic will loop -through all of the VCPUs, and for each VCPUs, it call IRQ_check, which will loop -through all the pending interrupts for that VCPU. This is done while holding the -raw_lock, meaning that in all this time the interrupts and preemption are -disabled on the host Linux. A malicious user app can max both these number and -cause a DoS. - -This temporary fix is sent for two reasons. First is so that users who want to -use the in-kernel MPIC emulation are aware of the potential latencies, thus -making sure that the hardware MPIC and their usage scenario does not involve -interrupts sent in directed delivery mode, and the number of possible pending -interrupts is kept small. Secondly, this should incentivize the development of a -proper openpic emulation that would be better suited for RT. - -Acked-by: Scott Wood -Signed-off-by: Bogdan Purcareata -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/powerpc/kvm/Kconfig | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig -index 68a0e9d5b440..6f4d5d7615af 100644 ---- a/arch/powerpc/kvm/Kconfig -+++ b/arch/powerpc/kvm/Kconfig -@@ -178,6 +178,7 @@ config KVM_E500MC - config KVM_MPIC - bool "KVM in-kernel MPIC emulation" - depends on KVM && E500 -+ depends on !PREEMPT_RT_FULL - select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQFD - select HAVE_KVM_IRQ_ROUTING --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch b/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch deleted file mode 100644 index a5c4b4983..000000000 --- a/kernel/patches-4.19.x-rt/0050-powerpc-Disable-highmem-on-RT.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 13e6a60aad3edc7b4efd2168abcca0447ff20763 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 17:08:34 +0200 -Subject: [PATCH 050/328] powerpc: Disable highmem on RT - -The current highmem handling on -RT is not compatible and needs fixups. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index 3d5c86336072..1b332f69dd36 100644 ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -399,7 +399,7 @@ menu "Kernel options" - - config HIGHMEM - bool "High memory support" -- depends on PPC32 -+ depends on PPC32 && !PREEMPT_RT_FULL - - source kernel/Kconfig.hz - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch b/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch deleted file mode 100644 index 168f53b19..000000000 --- a/kernel/patches-4.19.x-rt/0051-mips-Disable-highmem-on-RT.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 55ff21a4418f35a443f2c210779a9ff4dee33e93 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 17:10:12 +0200 -Subject: [PATCH 051/328] mips: Disable highmem on RT - -The current highmem handling on -RT is not compatible and needs fixups. - -Signed-off-by: Thomas Gleixner ---- - arch/mips/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig -index a830a9701e50..3d5fae3891be 100644 ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -2518,7 +2518,7 @@ config MIPS_CRC_SUPPORT - # - config HIGHMEM - bool "High Memory Support" -- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA -+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL - - config CPU_SUPPORTS_HIGHMEM - bool --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch b/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch deleted file mode 100644 index 198cf07af..000000000 --- a/kernel/patches-4.19.x-rt/0052-x86-Use-generic-rwsem_spinlocks-on-rt.patch +++ /dev/null @@ -1,33 +0,0 @@ -From d0b5d43931b3de89c64c8a697256eb60eb9c0ebb Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 26 Jul 2009 02:21:32 +0200 -Subject: [PATCH 052/328] x86: Use generic rwsem_spinlocks on -rt - -Simplifies the separation of anon_rw_semaphores and rw_semaphores for --rt. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/Kconfig | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index e40ba59efe7f..f22e787329cf 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -264,8 +264,11 @@ config ARCH_MAY_HAVE_PC_FDC - def_bool y - depends on ISA_DMA_API - -+config RWSEM_GENERIC_SPINLOCK -+ def_bool PREEMPT_RT_FULL -+ - config RWSEM_XCHGADD_ALGORITHM -- def_bool y -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_CALIBRATE_DELAY - def_bool y --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch b/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch deleted file mode 100644 index e66474b3e..000000000 --- a/kernel/patches-4.19.x-rt/0053-leds-trigger-disable-CPU-trigger-on-RT.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 9c164cac4dbebd9bf5376428113db97b366625a0 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 23 Jan 2014 14:45:59 +0100 -Subject: [PATCH 053/328] leds: trigger: disable CPU trigger on -RT - -as it triggers: -|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141 -|[] (unwind_backtrace+0x0/0xf8) from [] (show_stack+0x1c/0x20) -|[] (show_stack+0x1c/0x20) from [] (dump_stack+0x20/0x2c) -|[] (dump_stack+0x20/0x2c) from [] (__might_sleep+0x13c/0x170) -|[] (__might_sleep+0x13c/0x170) from [] (__rt_spin_lock+0x28/0x38) -|[] (__rt_spin_lock+0x28/0x38) from [] (rt_read_lock+0x68/0x7c) -|[] (rt_read_lock+0x68/0x7c) from [] (led_trigger_event+0x2c/0x5c) -|[] (led_trigger_event+0x2c/0x5c) from [] (ledtrig_cpu+0x54/0x5c) -|[] (ledtrig_cpu+0x54/0x5c) from [] (arch_cpu_idle_exit+0x18/0x1c) -|[] (arch_cpu_idle_exit+0x18/0x1c) from [] (cpu_startup_entry+0xa8/0x234) -|[] (cpu_startup_entry+0xa8/0x234) from [] (rest_init+0xb8/0xe0) -|[] (rest_init+0xb8/0xe0) from [] (start_kernel+0x2c4/0x380) - - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/leds/trigger/Kconfig | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig -index 4018af769969..b4ce8c115949 100644 ---- a/drivers/leds/trigger/Kconfig -+++ b/drivers/leds/trigger/Kconfig -@@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT - - config LEDS_TRIGGER_CPU - bool "LED CPU Trigger" -+ depends on !PREEMPT_RT_BASE - help - This allows LEDs to be controlled by active CPUs. This shows - the active CPUs across an array of LEDs so you can see which --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch deleted file mode 100644 index 225290bf5..000000000 --- a/kernel/patches-4.19.x-rt/0054-cpufreq-drop-K8-s-driver-from-beeing-selected.patch +++ /dev/null @@ -1,38 +0,0 @@ -From fa67192faa15cd98f554bcf82f0ecc40a26d9165 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 9 Apr 2015 15:23:01 +0200 -Subject: [PATCH 054/328] cpufreq: drop K8's driver from beeing selected - -Ralf posted a picture of a backtrace from - -| powernowk8_target_fn() -> transition_frequency_fidvid() and then at the -| end: -| 932 policy = cpufreq_cpu_get(smp_processor_id()); -| 933 cpufreq_cpu_put(policy); - -crashing the system on -RT. I assumed that policy was a NULL pointer but -was rulled out. Since Ralf can't do any more investigations on this and -I have no machine with this, I simply switch it off. - -Reported-by: Ralf Mardorf -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/cpufreq/Kconfig.x86 | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 -index 35f71825b7f3..bb4a6160d0f7 100644 ---- a/drivers/cpufreq/Kconfig.x86 -+++ b/drivers/cpufreq/Kconfig.x86 -@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI - - config X86_POWERNOW_K8 - tristate "AMD Opteron/Athlon64 PowerNow!" -- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ -+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE - help - This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. - Support for K10 and newer processors is now in acpi-cpufreq. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch b/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch deleted file mode 100644 index 88247b572..000000000 --- a/kernel/patches-4.19.x-rt/0055-md-disable-bcache.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 274246f0cc33aabdc562929c114eae24541eb9a3 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 29 Aug 2013 11:48:57 +0200 -Subject: [PATCH 055/328] md: disable bcache -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -It uses anon semaphores -|drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: -|drivers/md/bcache/request.c:1007:2: error: implicit declaration of function ‘up_read_non_owner’ [-Werror=implicit-function-declaration] -| up_read_non_owner(&dc->writeback_lock); -| ^ -|drivers/md/bcache/request.c: In function ‘request_write’: -|drivers/md/bcache/request.c:1033:2: error: implicit declaration of function ‘down_read_non_owner’ [-Werror=implicit-function-declaration] -| down_read_non_owner(&dc->writeback_lock); -| ^ - -either we get rid of those or we have to introduce them… - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/md/bcache/Kconfig | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig -index f6e0a8b3a61e..18c03d79a442 100644 ---- a/drivers/md/bcache/Kconfig -+++ b/drivers/md/bcache/Kconfig -@@ -1,6 +1,7 @@ - - config BCACHE - tristate "Block device as cache" -+ depends on !PREEMPT_RT_FULL - select CRC64 - help - Allows a block device to be used as cache for other devices; uses --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch b/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch deleted file mode 100644 index 5cfb78dbf..000000000 --- a/kernel/patches-4.19.x-rt/0056-efi-Disable-runtime-services-on-RT.patch +++ /dev/null @@ -1,45 +0,0 @@ -From b453602d0f02b94edf714d46e6293b037ea94b67 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 26 Jul 2018 15:03:16 +0200 -Subject: [PATCH 056/328] efi: Disable runtime services on RT - -Based on meassurements the EFI functions get_variable / -get_next_variable take up to 2us which looks okay. -The functions get_time, set_time take around 10ms. Those 10ms are too -much. Even one ms would be too much. -Ard mentioned that SetVariable might even trigger larger latencies if -the firware will erase flash blocks on NOR. - -The time-functions are used by efi-rtc and can be triggered during -runtimed (either via explicit read/write or ntp sync). - -The variable write could be used by pstore. -These functions can be disabled without much of a loss. The poweroff / -reboot hooks may be provided by PSCI. - -Disable EFI's runtime wrappers. - -This was observed on "EFI v2.60 by SoftIron Overdrive 1000". - -Acked-by: Ard Biesheuvel -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/firmware/efi/efi.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c -index 5db20908aa9c..1708505fdf5d 100644 ---- a/drivers/firmware/efi/efi.c -+++ b/drivers/firmware/efi/efi.c -@@ -87,7 +87,7 @@ struct mm_struct efi_mm = { - - struct workqueue_struct *efi_rts_wq; - --static bool disable_runtime; -+static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE); - static int __init setup_noefi(char *arg) - { - disable_runtime = true; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch b/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch deleted file mode 100644 index 264e226bb..000000000 --- a/kernel/patches-4.19.x-rt/0057-printk-Add-a-printk-kill-switch.patch +++ /dev/null @@ -1,173 +0,0 @@ -From dc16c6a47edf44cb0f69ff6124d6a85ee6dd3dcc Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 22 Jul 2011 17:58:40 +0200 -Subject: [PATCH 057/328] printk: Add a printk kill switch - -Add a prinkt-kill-switch. This is used from (NMI) watchdog to ensure that -it does not dead-lock with the early printk code. - -Signed-off-by: Thomas Gleixner ---- - include/linux/printk.h | 2 ++ - kernel/printk/printk.c | 79 +++++++++++++++++++++++++++++++----------- - kernel/watchdog_hld.c | 10 ++++++ - 3 files changed, 71 insertions(+), 20 deletions(-) - -diff --git a/include/linux/printk.h b/include/linux/printk.h -index cf3eccfe1543..30ebf5f82a7c 100644 ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -140,9 +140,11 @@ struct va_format { - #ifdef CONFIG_EARLY_PRINTK - extern asmlinkage __printf(1, 2) - void early_printk(const char *fmt, ...); -+extern void printk_kill(void); - #else - static inline __printf(1, 2) __cold - void early_printk(const char *s, ...) { } -+static inline void printk_kill(void) { } - #endif - - #ifdef CONFIG_PRINTK_NMI -diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 7a2fdc097c8c..29838e532f46 100644 ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -405,6 +405,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); - printk_safe_exit_irqrestore(flags); \ - } while (0) - -+#ifdef CONFIG_EARLY_PRINTK -+struct console *early_console; -+ -+static void early_vprintk(const char *fmt, va_list ap) -+{ -+ if (early_console) { -+ char buf[512]; -+ int n = vscnprintf(buf, sizeof(buf), fmt, ap); -+ -+ early_console->write(early_console, buf, n); -+ } -+} -+ -+asmlinkage void early_printk(const char *fmt, ...) -+{ -+ va_list ap; -+ -+ va_start(ap, fmt); -+ early_vprintk(fmt, ap); -+ va_end(ap); -+} -+ -+/* -+ * This is independent of any log levels - a global -+ * kill switch that turns off all of printk. -+ * -+ * Used by the NMI watchdog if early-printk is enabled. -+ */ -+static bool __read_mostly printk_killswitch; -+ -+void printk_kill(void) -+{ -+ printk_killswitch = true; -+} -+ -+#ifdef CONFIG_PRINTK -+static int forced_early_printk(const char *fmt, va_list ap) -+{ -+ if (!printk_killswitch) -+ return 0; -+ early_vprintk(fmt, ap); -+ return 1; -+} -+#endif -+ -+#else -+static inline int forced_early_printk(const char *fmt, va_list ap) -+{ -+ return 0; -+} -+#endif -+ - #ifdef CONFIG_PRINTK - DECLARE_WAIT_QUEUE_HEAD(log_wait); - /* the next printk record to read by syslog(READ) or /proc/kmsg */ -@@ -1905,6 +1957,13 @@ asmlinkage int vprintk_emit(int facility, int level, - unsigned long flags; - u64 curr_log_seq; - -+ /* -+ * Fall back to early_printk if a debugging subsystem has -+ * killed printk output -+ */ -+ if (unlikely(forced_early_printk(fmt, args))) -+ return 1; -+ - if (level == LOGLEVEL_SCHED) { - level = LOGLEVEL_DEFAULT; - in_sched = true; -@@ -2049,26 +2108,6 @@ static bool suppress_message_printing(int level) { return false; } - - #endif /* CONFIG_PRINTK */ - --#ifdef CONFIG_EARLY_PRINTK --struct console *early_console; -- --asmlinkage __visible void early_printk(const char *fmt, ...) --{ -- va_list ap; -- char buf[512]; -- int n; -- -- if (!early_console) -- return; -- -- va_start(ap, fmt); -- n = vscnprintf(buf, sizeof(buf), fmt, ap); -- va_end(ap); -- -- early_console->write(early_console, buf, n); --} --#endif -- - static int __add_preferred_console(char *name, int idx, char *options, - char *brl_options) - { -diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c -index 71381168dede..685443375dc0 100644 ---- a/kernel/watchdog_hld.c -+++ b/kernel/watchdog_hld.c -@@ -24,6 +24,8 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn); - static DEFINE_PER_CPU(bool, watchdog_nmi_touch); - static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); - static DEFINE_PER_CPU(struct perf_event *, dead_event); -+static DEFINE_RAW_SPINLOCK(watchdog_output_lock); -+ - static struct cpumask dead_events_mask; - - static unsigned long hardlockup_allcpu_dumped; -@@ -134,6 +136,13 @@ static void watchdog_overflow_callback(struct perf_event *event, - /* only print hardlockups once */ - if (__this_cpu_read(hard_watchdog_warn) == true) - return; -+ /* -+ * If early-printk is enabled then make sure we do not -+ * lock up in printk() and kill console logging: -+ */ -+ printk_kill(); -+ -+ raw_spin_lock(&watchdog_output_lock); - - pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - print_modules(); -@@ -151,6 +160,7 @@ static void watchdog_overflow_callback(struct perf_event *event, - !test_and_set_bit(0, &hardlockup_allcpu_dumped)) - trigger_allbutself_cpu_backtrace(); - -+ raw_spin_unlock(&watchdog_output_lock); - if (hardlockup_panic) - nmi_panic(regs, "Hard LOCKUP"); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch b/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch deleted file mode 100644 index fb083d330..000000000 --- a/kernel/patches-4.19.x-rt/0058-printk-Add-force_early_printk-boot-param-to-help-wit.patch +++ /dev/null @@ -1,38 +0,0 @@ -From bfe8e0affbc2a1d3f23e9aec43198b31d1115f3d Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Fri, 2 Sep 2011 14:41:29 +0200 -Subject: [PATCH 058/328] printk: Add "force_early_printk" boot param to help - with debugging - -Gives me an option to screw printk and actually see what the machine -says. - -Signed-off-by: Peter Zijlstra -Link: http://lkml.kernel.org/r/1314967289.1301.11.camel@twins -Signed-off-by: Thomas Gleixner -Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org ---- - kernel/printk/printk.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 29838e532f46..f934baed564d 100644 ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -435,6 +435,13 @@ asmlinkage void early_printk(const char *fmt, ...) - */ - static bool __read_mostly printk_killswitch; - -+static int __init force_early_printk_setup(char *str) -+{ -+ printk_killswitch = true; -+ return 0; -+} -+early_param("force_early_printk", force_early_printk_setup); -+ - void printk_kill(void) - { - printk_killswitch = true; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch b/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch deleted file mode 100644 index 668b9aa66..000000000 --- a/kernel/patches-4.19.x-rt/0059-preempt-Provide-preempt_-_-no-rt-variants.patch +++ /dev/null @@ -1,52 +0,0 @@ -From f3b0b7d6dafcddd6bed43f78535a5494b0e09aa8 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 24 Jul 2009 12:38:56 +0200 -Subject: [PATCH 059/328] preempt: Provide preempt_*_(no)rt variants - -RT needs a few preempt_disable/enable points which are not necessary -otherwise. Implement variants to avoid #ifdeffery. - -Signed-off-by: Thomas Gleixner ---- - include/linux/preempt.h | 18 +++++++++++++++++- - 1 file changed, 17 insertions(+), 1 deletion(-) - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index 3196d0e76719..f7a17fcc3fec 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -181,7 +181,11 @@ do { \ - preempt_count_dec(); \ - } while (0) - --#define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+#else -+# define preempt_enable_no_resched() preempt_enable() -+#endif - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) - -@@ -298,6 +302,18 @@ do { \ - set_preempt_need_resched(); \ - } while (0) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define preempt_disable_rt() preempt_disable() -+# define preempt_enable_rt() preempt_enable() -+# define preempt_disable_nort() barrier() -+# define preempt_enable_nort() barrier() -+#else -+# define preempt_disable_rt() barrier() -+# define preempt_enable_rt() barrier() -+# define preempt_disable_nort() preempt_disable() -+# define preempt_enable_nort() preempt_enable() -+#endif -+ - #ifdef CONFIG_PREEMPT_NOTIFIERS - - struct preempt_notifier; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch b/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch deleted file mode 100644 index dbbe56b87..000000000 --- a/kernel/patches-4.19.x-rt/0060-futex-workaround-migrate_disable-enable-in-different.patch +++ /dev/null @@ -1,69 +0,0 @@ -From a2a505f1e5d127a2c3b1ee184d27d0f402dcdf63 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 8 Mar 2017 14:23:35 +0100 -Subject: [PATCH 060/328] futex: workaround migrate_disable/enable in different - context - -migrate_disable()/migrate_enable() takes a different path in atomic() vs -!atomic() context. These little hacks ensure that we don't underflow / overflow -the migrate code counts properly while we lock the hb lockwith interrupts -enabled and unlock it with interrupts disabled. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/futex.c | 19 +++++++++++++++++++ - 1 file changed, 19 insertions(+) - -diff --git a/kernel/futex.c b/kernel/futex.c -index e75ad30aa7bc..5c8053098fc8 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -2879,6 +2879,14 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - * before __rt_mutex_start_proxy_lock() is done. - */ - raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); -+ /* -+ * the migrate_disable() here disables migration in the in_atomic() fast -+ * path which is enabled again in the following spin_unlock(). We have -+ * one migrate_disable() pending in the slow-path which is reversed -+ * after the raw_spin_unlock_irq() where we leave the atomic context. -+ */ -+ migrate_disable(); -+ - spin_unlock(q.lock_ptr); - /* - * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter -@@ -2887,6 +2895,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - */ - ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); - raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); -+ migrate_enable(); - - if (ret) { - if (ret == 1) -@@ -3035,11 +3044,21 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - * rt_waiter. Also see the WARN in wake_futex_pi(). - */ - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); -+ /* -+ * Magic trickery for now to make the RT migrate disable -+ * logic happy. The following spin_unlock() happens with -+ * interrupts disabled so the internal migrate_enable() -+ * won't undo the migrate_disable() which was issued when -+ * locking hb->lock. -+ */ -+ migrate_disable(); - spin_unlock(&hb->lock); - - /* drops pi_state->pi_mutex.wait_lock */ - ret = wake_futex_pi(uaddr, uval, pi_state); - -+ migrate_enable(); -+ - put_pi_state(pi_state); - - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch b/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch deleted file mode 100644 index 7be11bb74..000000000 --- a/kernel/patches-4.19.x-rt/0061-rt-Add-local-irq-locks.patch +++ /dev/null @@ -1,340 +0,0 @@ -From 4db63a0605ac780bf1525c6a90667aef3f897dc1 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 20 Jun 2011 09:03:47 +0200 -Subject: [PATCH 061/328] rt: Add local irq locks - -Introduce locallock. For !RT this maps to preempt_disable()/ -local_irq_disable() so there is not much that changes. For RT this will -map to a spinlock. This makes preemption possible and locked "ressource" -gets the lockdep anotation it wouldn't have otherwise. The locks are -recursive for owner == current. Also, all locks user migrate_disable() -which ensures that the task is not migrated to another CPU while the lock -is held and the owner is preempted. - -Signed-off-by: Thomas Gleixner ---- - include/linux/locallock.h | 271 ++++++++++++++++++++++++++++++++++++++ - include/linux/percpu.h | 29 ++++ - 2 files changed, 300 insertions(+) - create mode 100644 include/linux/locallock.h - -diff --git a/include/linux/locallock.h b/include/linux/locallock.h -new file mode 100644 -index 000000000000..d658c2552601 ---- /dev/null -+++ b/include/linux/locallock.h -@@ -0,0 +1,271 @@ -+#ifndef _LINUX_LOCALLOCK_H -+#define _LINUX_LOCALLOCK_H -+ -+#include -+#include -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define LL_WARN(cond) WARN_ON(cond) -+#else -+# define LL_WARN(cond) do { } while (0) -+#endif -+ -+/* -+ * per cpu lock based substitute for local_irq_*() -+ */ -+struct local_irq_lock { -+ spinlock_t lock; -+ struct task_struct *owner; -+ int nestcnt; -+ unsigned long flags; -+}; -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ -+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ -+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } -+ -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ -+ DECLARE_PER_CPU(struct local_irq_lock, lvar) -+ -+#define local_irq_lock_init(lvar) \ -+ do { \ -+ int __cpu; \ -+ for_each_possible_cpu(__cpu) \ -+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ -+ } while (0) -+ -+static inline void __local_lock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ spin_lock(&lv->lock); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ } -+ lv->nestcnt++; -+} -+ -+#define local_lock(lvar) \ -+ do { __local_lock(&get_local_var(lvar)); } while (0) -+ -+#define local_lock_on(lvar, cpu) \ -+ do { __local_lock(&per_cpu(lvar, cpu)); } while (0) -+ -+static inline int __local_trylock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current && spin_trylock(&lv->lock)) { -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+ return 1; -+ } else if (lv->owner == current) { -+ lv->nestcnt++; -+ return 1; -+ } -+ return 0; -+} -+ -+#define local_trylock(lvar) \ -+ ({ \ -+ int __locked; \ -+ __locked = __local_trylock(&get_local_var(lvar)); \ -+ if (!__locked) \ -+ put_local_var(lvar); \ -+ __locked; \ -+ }) -+ -+static inline void __local_unlock(struct local_irq_lock *lv) -+{ -+ LL_WARN(lv->nestcnt == 0); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return; -+ -+ lv->owner = NULL; -+ spin_unlock(&lv->lock); -+} -+ -+#define local_unlock(lvar) \ -+ do { \ -+ __local_unlock(this_cpu_ptr(&lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_on(lvar, cpu) \ -+ do { __local_unlock(&per_cpu(lvar, cpu)); } while (0) -+ -+static inline void __local_lock_irq(struct local_irq_lock *lv) -+{ -+ spin_lock_irqsave(&lv->lock, lv->flags); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+} -+ -+#define local_lock_irq(lvar) \ -+ do { __local_lock_irq(&get_local_var(lvar)); } while (0) -+ -+#define local_lock_irq_on(lvar, cpu) \ -+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) -+ -+static inline void __local_unlock_irq(struct local_irq_lock *lv) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ lv->owner = NULL; -+ lv->nestcnt = 0; -+ spin_unlock_irq(&lv->lock); -+} -+ -+#define local_unlock_irq(lvar) \ -+ do { \ -+ __local_unlock_irq(this_cpu_ptr(&lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irq_on(lvar, cpu) \ -+ do { \ -+ __local_unlock_irq(&per_cpu(lvar, cpu)); \ -+ } while (0) -+ -+static inline int __local_lock_irqsave(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ __local_lock_irq(lv); -+ return 0; -+ } else { -+ lv->nestcnt++; -+ return 1; -+ } -+} -+ -+#define local_lock_irqsave(lvar, _flags) \ -+ do { \ -+ if (__local_lock_irqsave(&get_local_var(lvar))) \ -+ put_local_var(lvar); \ -+ _flags = __this_cpu_read(lvar.flags); \ -+ } while (0) -+ -+#define local_lock_irqsave_on(lvar, _flags, cpu) \ -+ do { \ -+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ -+ _flags = per_cpu(lvar, cpu).flags; \ -+ } while (0) -+ -+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, -+ unsigned long flags) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return 0; -+ -+ lv->owner = NULL; -+ spin_unlock_irqrestore(&lv->lock, lv->flags); -+ return 1; -+} -+ -+#define local_unlock_irqrestore(lvar, flags) \ -+ do { \ -+ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irqrestore_on(lvar, flags, cpu) \ -+ do { \ -+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ -+ } while (0) -+ -+#define local_spin_trylock_irq(lvar, lock) \ -+ ({ \ -+ int __locked; \ -+ local_lock_irq(lvar); \ -+ __locked = spin_trylock(lock); \ -+ if (!__locked) \ -+ local_unlock_irq(lvar); \ -+ __locked; \ -+ }) -+ -+#define local_spin_lock_irq(lvar, lock) \ -+ do { \ -+ local_lock_irq(lvar); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irq(lvar, lock) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irq(lvar); \ -+ } while (0) -+ -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ do { \ -+ local_lock_irqsave(lvar, flags); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irqrestore(lvar, flags); \ -+ } while (0) -+ -+#define get_locked_var(lvar, var) \ -+ (*({ \ -+ local_lock(lvar); \ -+ this_cpu_ptr(&var); \ -+ })) -+ -+#define put_locked_var(lvar, var) local_unlock(lvar); -+ -+#define local_lock_cpu(lvar) \ -+ ({ \ -+ local_lock(lvar); \ -+ smp_processor_id(); \ -+ }) -+ -+#define local_unlock_cpu(lvar) local_unlock(lvar) -+ -+#else /* PREEMPT_RT_BASE */ -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar -+ -+static inline void local_irq_lock_init(int lvar) { } -+ -+#define local_trylock(lvar) \ -+ ({ \ -+ preempt_disable(); \ -+ 1; \ -+ }) -+ -+#define local_lock(lvar) preempt_disable() -+#define local_unlock(lvar) preempt_enable() -+#define local_lock_irq(lvar) local_irq_disable() -+#define local_lock_irq_on(lvar, cpu) local_irq_disable() -+#define local_unlock_irq(lvar) local_irq_enable() -+#define local_unlock_irq_on(lvar, cpu) local_irq_enable() -+#define local_lock_irqsave(lvar, flags) local_irq_save(flags) -+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) -+ -+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) -+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) -+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ spin_lock_irqsave(lock, flags) -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ spin_unlock_irqrestore(lock, flags) -+ -+#define get_locked_var(lvar, var) get_cpu_var(var) -+#define put_locked_var(lvar, var) put_cpu_var(var) -+ -+#define local_lock_cpu(lvar) get_cpu() -+#define local_unlock_cpu(lvar) put_cpu() -+ -+#endif -+ -+#endif -diff --git a/include/linux/percpu.h b/include/linux/percpu.h -index 70b7123f38c7..24421bf8c4b3 100644 ---- a/include/linux/percpu.h -+++ b/include/linux/percpu.h -@@ -19,6 +19,35 @@ - #define PERCPU_MODULE_RESERVE 0 - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+#define get_local_var(var) (*({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(&var); })) -+ -+#define put_local_var(var) do { \ -+ (void)&(var); \ -+ migrate_enable(); \ -+} while (0) -+ -+# define get_local_ptr(var) ({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(var); }) -+ -+# define put_local_ptr(var) do { \ -+ (void)(var); \ -+ migrate_enable(); \ -+} while (0) -+ -+#else -+ -+#define get_local_var(var) get_cpu_var(var) -+#define put_local_var(var) put_cpu_var(var) -+#define get_local_ptr(var) get_cpu_ptr(var) -+#define put_local_ptr(var) put_cpu_ptr(var) -+ -+#endif -+ - /* minimum unit size, also is the maximum supported allocation size */ - #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch b/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch deleted file mode 100644 index e20ba81c2..000000000 --- a/kernel/patches-4.19.x-rt/0062-locallock-provide-get-put-_locked_ptr-variants.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 0e057064ed654b5f7fa22cc5f159ed67eeb332dc Mon Sep 17 00:00:00 2001 -From: Julia Cartwright -Date: Mon, 7 May 2018 08:58:56 -0500 -Subject: [PATCH 062/328] locallock: provide {get,put}_locked_ptr() variants - -Provide a set of locallocked accessors for pointers to per-CPU data; -this is useful for dynamically-allocated per-CPU regions, for example. - -These are symmetric with the {get,put}_cpu_ptr() per-CPU accessor -variants. - -Signed-off-by: Julia Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/locallock.h | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/include/linux/locallock.h b/include/linux/locallock.h -index d658c2552601..921eab83cd34 100644 ---- a/include/linux/locallock.h -+++ b/include/linux/locallock.h -@@ -222,6 +222,14 @@ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, - - #define put_locked_var(lvar, var) local_unlock(lvar); - -+#define get_locked_ptr(lvar, var) \ -+ ({ \ -+ local_lock(lvar); \ -+ this_cpu_ptr(var); \ -+ }) -+ -+#define put_locked_ptr(lvar, var) local_unlock(lvar); -+ - #define local_lock_cpu(lvar) \ - ({ \ - local_lock(lvar); \ -@@ -262,6 +270,8 @@ static inline void local_irq_lock_init(int lvar) { } - - #define get_locked_var(lvar, var) get_cpu_var(var) - #define put_locked_var(lvar, var) put_cpu_var(var) -+#define get_locked_ptr(lvar, var) get_cpu_ptr(var) -+#define put_locked_ptr(lvar, var) put_cpu_ptr(var) - - #define local_lock_cpu(lvar) get_cpu() - #define local_unlock_cpu(lvar) put_cpu() --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch b/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch deleted file mode 100644 index 3a55eb75b..000000000 --- a/kernel/patches-4.19.x-rt/0063-mm-scatterlist-Do-not-disable-irqs-on-RT.patch +++ /dev/null @@ -1,29 +0,0 @@ -From a132d9a98679bcc505c36c80270ddaa741c15cbc Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:44:34 -0500 -Subject: [PATCH 063/328] mm/scatterlist: Do not disable irqs on RT - -For -RT it is enough to keep pagefault disabled (which is currently handled by -kmap_atomic()). - -Signed-off-by: Thomas Gleixner ---- - lib/scatterlist.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/scatterlist.c b/lib/scatterlist.c -index 60e7eca2f4be..aad8b9ecd496 100644 ---- a/lib/scatterlist.c -+++ b/lib/scatterlist.c -@@ -777,7 +777,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) - flush_kernel_dcache_page(miter->page); - - if (miter->__flags & SG_MITER_ATOMIC) { -- WARN_ON_ONCE(preemptible()); -+ WARN_ON_ONCE(!pagefault_disabled()); - kunmap_atomic(miter->addr); - } else - kunmap(miter->page); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch b/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch deleted file mode 100644 index e3aa226f9..000000000 --- a/kernel/patches-4.19.x-rt/0064-signal-x86-Delay-calling-signals-in-atomic.patch +++ /dev/null @@ -1,153 +0,0 @@ -From f95acea987d23816f8094d7db13ae2afb94136ce Mon Sep 17 00:00:00 2001 -From: Oleg Nesterov -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 064/328] signal/x86: Delay calling signals in atomic - -On x86_64 we must disable preemption before we enable interrupts -for stack faults, int3 and debugging, because the current task is using -a per CPU debug stack defined by the IST. If we schedule out, another task -can come in and use the same stack and cause the stack to be corrupted -and crash the kernel on return. - -When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and -one of these is the spin lock used in signal handling. - -Some of the debug code (int3) causes do_trap() to send a signal. -This function calls a spin lock that has been converted to a mutex -and has the possibility to sleep. If this happens, the above issues with -the corrupted stack is possible. - -Instead of calling the signal right away, for PREEMPT_RT and x86_64, -the signal information is stored on the stacks task_struct and -TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume -code will send the signal when preemption is enabled. - -[ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to - ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] - - -Signed-off-by: Oleg Nesterov -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner ---- - arch/x86/entry/common.c | 7 +++++++ - arch/x86/include/asm/signal.h | 13 ++++++++++++ - include/linux/sched.h | 4 ++++ - kernel/signal.c | 37 +++++++++++++++++++++++++++++++++-- - 4 files changed, 59 insertions(+), 2 deletions(-) - -diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c -index 8353348ddeaf..91676b0d2d4c 100644 ---- a/arch/x86/entry/common.c -+++ b/arch/x86/entry/common.c -@@ -152,6 +152,13 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) - if (cached_flags & _TIF_NEED_RESCHED) - schedule(); - -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (unlikely(current->forced_info.si_signo)) { -+ struct task_struct *t = current; -+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); -+ t->forced_info.si_signo = 0; -+ } -+#endif - if (cached_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - -diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h -index 33d3c88a7225..fb0438d06ca7 100644 ---- a/arch/x86/include/asm/signal.h -+++ b/arch/x86/include/asm/signal.h -@@ -28,6 +28,19 @@ typedef struct { - #define SA_IA32_ABI 0x02000000u - #define SA_X32_ABI 0x01000000u - -+/* -+ * Because some traps use the IST stack, we must keep preemption -+ * disabled while calling do_trap(), but do_trap() may call -+ * force_sig_info() which will grab the signal spin_locks for the -+ * task, which in PREEMPT_RT_FULL are mutexes. By defining -+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set -+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the -+ * trap. -+ */ -+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) -+#define ARCH_RT_DELAYS_SIGNAL_SEND -+#endif -+ - #ifndef CONFIG_COMPAT - typedef sigset_t compat_sigset_t; - #endif -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 0489d3e0e78c..e4af260f81c5 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -881,6 +881,10 @@ struct task_struct { - /* Restored if set_restore_sigmask() was used: */ - sigset_t saved_sigmask; - struct sigpending pending; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* TODO: move me into ->restart_block ? */ -+ struct siginfo forced_info; -+#endif - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned int sas_ss_flags; -diff --git a/kernel/signal.c b/kernel/signal.c -index 5e278f1540ad..d5e764bb2444 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -1277,8 +1277,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, - * We don't want to have recursive SIGSEGV's etc, for example, - * that is why we also clear SIGNAL_UNKILLABLE. - */ --int --force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -+static int -+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) - { - unsigned long int flags; - int ret, blocked, ignored; -@@ -1307,6 +1307,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) - return ret; - } - -+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -+{ -+/* -+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap -+ * since it can not enable preemption, and the signal code's spin_locks -+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will -+ * send the signal on exit of the trap. -+ */ -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (in_atomic()) { -+ if (WARN_ON_ONCE(t != current)) -+ return 0; -+ if (WARN_ON_ONCE(t->forced_info.si_signo)) -+ return 0; -+ -+ if (is_si_special(info)) { -+ WARN_ON_ONCE(info != SEND_SIG_PRIV); -+ t->forced_info.si_signo = sig; -+ t->forced_info.si_errno = 0; -+ t->forced_info.si_code = SI_KERNEL; -+ t->forced_info.si_pid = 0; -+ t->forced_info.si_uid = 0; -+ } else { -+ t->forced_info = *info; -+ } -+ -+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); -+ return 0; -+ } -+#endif -+ return do_force_sig_info(sig, info, t); -+} -+ - /* - * Nuke all other threads in the group. - */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch b/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch deleted file mode 100644 index 264b0e6dc..000000000 --- a/kernel/patches-4.19.x-rt/0065-x86-signal-delay-calling-signals-on-32bit.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 44575d6c7e6fb548a6bf67f427d151301cd1dfd8 Mon Sep 17 00:00:00 2001 -From: Yang Shi -Date: Thu, 10 Dec 2015 10:58:51 -0800 -Subject: [PATCH 065/328] x86/signal: delay calling signals on 32bit - -When running some ptrace single step tests on x86-32 machine, the below problem -is triggered: - -BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917 -in_atomic(): 1, irqs_disabled(): 0, pid: 1041, name: dummy2 -Preemption disabled at:[] do_debug+0x1f/0x1a0 - -CPU: 10 PID: 1041 Comm: dummy2 Tainted: G W 4.1.13-rt13 #1 -Call Trace: - [] dump_stack+0x46/0x5c - [] ___might_sleep+0x137/0x220 - [] rt_spin_lock+0x1f/0x80 - [] do_force_sig_info+0x2a/0xc0 - [] force_sig_info+0xd/0x10 - [] send_sigtrap+0x6f/0x80 - [] do_debug+0x161/0x1a0 - [] debug_stack_correct+0x2e/0x35 - -This happens since 959274753857 ("x86, traps: Track entry into and exit -from IST context") which was merged in v4.1-rc1. - -Signed-off-by: Yang Shi -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/include/asm/signal.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h -index fb0438d06ca7..c00e27af2205 100644 ---- a/arch/x86/include/asm/signal.h -+++ b/arch/x86/include/asm/signal.h -@@ -37,7 +37,7 @@ typedef struct { - * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the - * trap. - */ --#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) -+#if defined(CONFIG_PREEMPT_RT_FULL) - #define ARCH_RT_DELAYS_SIGNAL_SEND - #endif - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch b/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch deleted file mode 100644 index 9525d4317..000000000 --- a/kernel/patches-4.19.x-rt/0066-buffer_head-Replace-bh_uptodate_lock-for-rt.patch +++ /dev/null @@ -1,196 +0,0 @@ -From 6b9121d4d6cf25eabc1b638027345308486f88b1 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 18 Mar 2011 09:18:52 +0100 -Subject: [PATCH 066/328] buffer_head: Replace bh_uptodate_lock for -rt - -Wrap the bit_spin_lock calls into a separate inline and add the RT -replacements with a real spinlock. - -Signed-off-by: Thomas Gleixner ---- - fs/buffer.c | 21 +++++++-------------- - fs/ext4/page-io.c | 6 ++---- - fs/ntfs/aops.c | 10 +++------- - include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ - 4 files changed, 46 insertions(+), 25 deletions(-) - -diff --git a/fs/buffer.c b/fs/buffer.c -index a550e0d8e965..a5b3a456dbff 100644 ---- a/fs/buffer.c -+++ b/fs/buffer.c -@@ -274,8 +274,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) - * decide that the page is now completely done. - */ - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -288,8 +287,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - - /* - * If none of the buffers had errors and they are all -@@ -301,9 +299,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /* -@@ -330,8 +326,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) - } - - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - - clear_buffer_async_write(bh); - unlock_buffer(bh); -@@ -343,15 +338,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) - } - tmp = tmp->b_this_page; - } -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - end_page_writeback(page); - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - EXPORT_SYMBOL(end_buffer_async_write); - -@@ -3368,6 +3360,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) - struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); - if (ret) { - INIT_LIST_HEAD(&ret->b_assoc_buffers); -+ buffer_head_init_locks(ret); - preempt_disable(); - __this_cpu_inc(bh_accounting.nr); - recalc_bh_state(); -diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c -index 9cc79b7b0df1..3f4ba2011499 100644 ---- a/fs/ext4/page-io.c -+++ b/fs/ext4/page-io.c -@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio) - * We check all buffers in the page under BH_Uptodate_Lock - * to avoid races with other end io clearing async_write flags - */ -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &head->b_state); -+ flags = bh_uptodate_lock_irqsave(head); - do { - if (bh_offset(bh) < bio_start || - bh_offset(bh) + bh->b_size > bio_end) { -@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *bio) - if (bio->bi_status) - buffer_io_error(bh); - } while ((bh = bh->b_this_page) != head); -- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(head, flags); - if (!under_io) { - #ifdef CONFIG_EXT4_FS_ENCRYPTION - if (data_page) -diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c -index 8946130c87ad..71d0b3ba70f8 100644 ---- a/fs/ntfs/aops.c -+++ b/fs/ntfs/aops.c -@@ -106,8 +106,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) - "0x%llx.", (unsigned long long)bh->b_blocknr); - } - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -122,8 +121,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - /* - * If none of the buffers had errors then we can set the page uptodate, - * but we first have to perform the post read mst fixups, if the -@@ -156,9 +154,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) - unlock_page(page); - return; - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /** -diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h -index 96225a77c112..8a1bcfb145d7 100644 ---- a/include/linux/buffer_head.h -+++ b/include/linux/buffer_head.h -@@ -76,8 +76,42 @@ struct buffer_head { - struct address_space *b_assoc_map; /* mapping this buffer is - associated with */ - atomic_t b_count; /* users using this buffer_head */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spinlock_t b_uptodate_lock; -+#endif - }; - -+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) -+{ -+ unsigned long flags; -+ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ local_irq_save(flags); -+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); -+#else -+ spin_lock_irqsave(&bh->b_uptodate_lock, flags); -+#endif -+ return flags; -+} -+ -+static inline void -+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) -+{ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); -+ local_irq_restore(flags); -+#else -+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); -+#endif -+} -+ -+static inline void buffer_head_init_locks(struct buffer_head *bh) -+{ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spin_lock_init(&bh->b_uptodate_lock); -+#endif -+} -+ - /* - * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() - * and buffer_foo() functions. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch b/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch deleted file mode 100644 index 40dd8b43d..000000000 --- a/kernel/patches-4.19.x-rt/0067-fs-jbd-jbd2-Make-state-lock-and-journal-head-lock-rt.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 72ca6594764d9a6523352dc609644bea68a3a74b Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 18 Mar 2011 10:11:25 +0100 -Subject: [PATCH 067/328] fs: jbd/jbd2: Make state lock and journal head lock - rt safe - -bit_spin_locks break under RT. - -Based on a previous patch from Steven Rostedt -Signed-off-by: Thomas Gleixner --- - include/linux/buffer_head.h | 8 ++++++++ - include/linux/jbd2.h | 24 ++++++++++++++++++++++++ - 2 files changed, 32 insertions(+) ---- - include/linux/buffer_head.h | 8 ++++++++ - include/linux/jbd2.h | 24 ++++++++++++++++++++++++ - 2 files changed, 32 insertions(+) - -diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h -index 8a1bcfb145d7..5869330d1f38 100644 ---- a/include/linux/buffer_head.h -+++ b/include/linux/buffer_head.h -@@ -78,6 +78,10 @@ struct buffer_head { - atomic_t b_count; /* users using this buffer_head */ - #ifdef CONFIG_PREEMPT_RT_BASE - spinlock_t b_uptodate_lock; -+#if IS_ENABLED(CONFIG_JBD2) -+ spinlock_t b_state_lock; -+ spinlock_t b_journal_head_lock; -+#endif - #endif - }; - -@@ -109,6 +113,10 @@ static inline void buffer_head_init_locks(struct buffer_head *bh) - { - #ifdef CONFIG_PREEMPT_RT_BASE - spin_lock_init(&bh->b_uptodate_lock); -+#if IS_ENABLED(CONFIG_JBD2) -+ spin_lock_init(&bh->b_state_lock); -+ spin_lock_init(&bh->b_journal_head_lock); -+#endif - #endif - } - -diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h -index 268f3000d1b3..8f5d6ecb802e 100644 ---- a/include/linux/jbd2.h -+++ b/include/linux/jbd2.h -@@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) - - static inline void jbd_lock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_State, &bh->b_state); -+#else -+ spin_lock(&bh->b_state_lock); -+#endif - } - - static inline int jbd_trylock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_trylock(BH_State, &bh->b_state); -+#else -+ return spin_trylock(&bh->b_state_lock); -+#endif - } - - static inline int jbd_is_locked_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_is_locked(BH_State, &bh->b_state); -+#else -+ return spin_is_locked(&bh->b_state_lock); -+#endif - } - - static inline void jbd_unlock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_State, &bh->b_state); -+#else -+ spin_unlock(&bh->b_state_lock); -+#endif - } - - static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_JournalHead, &bh->b_state); -+#else -+ spin_lock(&bh->b_journal_head_lock); -+#endif - } - - static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_JournalHead, &bh->b_state); -+#else -+ spin_unlock(&bh->b_journal_head_lock); -+#endif - } - - #define J_ASSERT(assert) BUG_ON(!(assert)) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch b/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch deleted file mode 100644 index 4266c50ce..000000000 --- a/kernel/patches-4.19.x-rt/0068-list_bl-Make-list-head-locking-RT-safe.patch +++ /dev/null @@ -1,120 +0,0 @@ -From eb37c3d2df6895d5c86504fdb1a509d075414f52 Mon Sep 17 00:00:00 2001 -From: Paul Gortmaker -Date: Fri, 21 Jun 2013 15:07:25 -0400 -Subject: [PATCH 068/328] list_bl: Make list head locking RT safe - -As per changes in include/linux/jbd_common.h for avoiding the -bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal -head lock rt safe") we do the same thing here. - -We use the non atomic __set_bit and __clear_bit inside the scope of -the lock to preserve the ability of the existing LIST_DEBUG code to -use the zero'th bit in the sanity checks. - -As a bit spinlock, we had no lockdep visibility into the usage -of the list head locking. Now, if we were to implement it as a -standard non-raw spinlock, we would see: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 -in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd -5 locks held by udevd/122: - #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 - #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 - #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 - #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 - #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 -Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 -Call Trace: - [] __might_sleep+0x134/0x1f0 - [] rt_spin_lock+0x24/0x60 - [] __d_shrink+0x5c/0xa0 - [] __d_drop+0x1d/0x40 - [] __d_move+0x8e/0x320 - [] d_move+0x3e/0x60 - [] vfs_rename+0x198/0x4c0 - [] sys_renameat+0x213/0x240 - [] ? _raw_spin_unlock+0x35/0x60 - [] ? do_page_fault+0x1ec/0x4b0 - [] ? retint_swapgs+0xe/0x13 - [] ? trace_hardirqs_on_thunk+0x3a/0x3f - [] sys_rename+0x1b/0x20 - [] system_call_fastpath+0x1a/0x1f - -Since we are only taking the lock during short lived list operations, -lets assume for now that it being raw won't be a significant latency -concern. - - -Signed-off-by: Paul Gortmaker -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/list_bl.h | 28 ++++++++++++++++++++++++++-- - 1 file changed, 26 insertions(+), 2 deletions(-) - -diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h -index 3fc2cc57ba1b..69b659259bac 100644 ---- a/include/linux/list_bl.h -+++ b/include/linux/list_bl.h -@@ -3,6 +3,7 @@ - #define _LINUX_LIST_BL_H - - #include -+#include - #include - - /* -@@ -33,13 +34,22 @@ - - struct hlist_bl_head { - struct hlist_bl_node *first; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spinlock_t lock; -+#endif - }; - - struct hlist_bl_node { - struct hlist_bl_node *next, **pprev; - }; --#define INIT_HLIST_BL_HEAD(ptr) \ -- ((ptr)->first = NULL) -+ -+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) -+{ -+ h->first = NULL; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spin_lock_init(&h->lock); -+#endif -+} - - static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) - { -@@ -119,12 +129,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) - - static inline void hlist_bl_lock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(0, (unsigned long *)b); -+#else -+ raw_spin_lock(&b->lock); -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __set_bit(0, (unsigned long *)b); -+#endif -+#endif - } - - static inline void hlist_bl_unlock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - __bit_spin_unlock(0, (unsigned long *)b); -+#else -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __clear_bit(0, (unsigned long *)b); -+#endif -+ raw_spin_unlock(&b->lock); -+#endif - } - - static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch b/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch deleted file mode 100644 index 541210fc8..000000000 --- a/kernel/patches-4.19.x-rt/0069-list_bl-fixup-bogus-lockdep-warning.patch +++ /dev/null @@ -1,103 +0,0 @@ -From a294373c35c31ae762358146f49c3c48f1429526 Mon Sep 17 00:00:00 2001 -From: Josh Cartwright -Date: Thu, 31 Mar 2016 00:04:25 -0500 -Subject: [PATCH 069/328] list_bl: fixup bogus lockdep warning - -At first glance, the use of 'static inline' seems appropriate for -INIT_HLIST_BL_HEAD(). - -However, when a 'static inline' function invocation is inlined by gcc, -all callers share any static local data declared within that inline -function. - -This presents a problem for how lockdep classes are setup. raw_spinlocks, for -example, when CONFIG_DEBUG_SPINLOCK, - - # define raw_spin_lock_init(lock) \ - do { \ - static struct lock_class_key __key; \ - \ - __raw_spin_lock_init((lock), #lock, &__key); \ - } while (0) - -When this macro is expanded into a 'static inline' caller, like -INIT_HLIST_BL_HEAD(): - - static inline INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) - { - h->first = NULL; - raw_spin_lock_init(&h->lock); - } - -...the static local lock_class_key object is made a function static. - -For compilation units which initialize invoke INIT_HLIST_BL_HEAD() more -than once, then, all of the invocations share this same static local -object. - -This can lead to some very confusing lockdep splats (example below). -Solve this problem by forcing the INIT_HLIST_BL_HEAD() to be a macro, -which prevents the lockdep class object sharing. - - ============================================= - [ INFO: possible recursive locking detected ] - 4.4.4-rt11 #4 Not tainted - --------------------------------------------- - kswapd0/59 is trying to acquire lock: - (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan - - but task is already holding lock: - (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan - - other info that might help us debug this: - Possible unsafe locking scenario: - - CPU0 - ---- - lock(&h->lock#2); - lock(&h->lock#2); - - *** DEADLOCK *** - - May be due to missing lock nesting notation - - 2 locks held by kswapd0/59: - #0: (shrinker_rwsem){+.+...}, at: rt_down_read_trylock - #1: (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan - -Reported-by: Luis Claudio R. Goncalves -Tested-by: Luis Claudio R. Goncalves -Signed-off-by: Josh Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/list_bl.h | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) - -diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h -index 69b659259bac..0b5de7d9ffcf 100644 ---- a/include/linux/list_bl.h -+++ b/include/linux/list_bl.h -@@ -43,13 +43,15 @@ struct hlist_bl_node { - struct hlist_bl_node *next, **pprev; - }; - --static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) --{ -- h->first = NULL; - #ifdef CONFIG_PREEMPT_RT_BASE -- raw_spin_lock_init(&h->lock); -+#define INIT_HLIST_BL_HEAD(h) \ -+do { \ -+ (h)->first = NULL; \ -+ raw_spin_lock_init(&(h)->lock); \ -+} while (0) -+#else -+#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL - #endif --} - - static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) - { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch b/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch deleted file mode 100644 index 3345bb3ec..000000000 --- a/kernel/patches-4.19.x-rt/0070-genirq-Disable-irqpoll-on-rt.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 974bfebe6d809861b9a25af561668633ef3168a7 Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:57 -0500 -Subject: [PATCH 070/328] genirq: Disable irqpoll on -rt - -Creates long latencies for no value - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - kernel/irq/spurious.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c -index d867d6ddafdd..cd12ee86c01e 100644 ---- a/kernel/irq/spurious.c -+++ b/kernel/irq/spurious.c -@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); - - static int __init irqfixup_setup(char *str) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); -+ return 1; -+#endif - irqfixup = 1; - printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); - printk(KERN_WARNING "This may impact system performance.\n"); -@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644); - - static int __init irqpoll_setup(char *str) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); -+ return 1; -+#endif - irqfixup = 2; - printk(KERN_WARNING "Misrouted IRQ fixup and polling support " - "enabled\n"); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch b/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch deleted file mode 100644 index 67a5776c6..000000000 --- a/kernel/patches-4.19.x-rt/0071-genirq-Force-interrupt-thread-on-RT.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 056e2667682c7afa14f71cb6d97ccf2217c0c08b Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 3 Apr 2011 11:57:29 +0200 -Subject: [PATCH 071/328] genirq: Force interrupt thread on RT - -Force threaded_irqs and optimize the code (force_irqthreads) in regard -to this. - -Signed-off-by: Thomas Gleixner ---- - include/linux/interrupt.h | 4 ++++ - kernel/irq/manage.c | 2 ++ - 2 files changed, 6 insertions(+) - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index eeceac3376fc..315f852b4981 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -427,7 +427,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, - bool state); - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifdef CONFIG_PREEMPT_RT_BASE -+# define force_irqthreads (true) -+# else - extern bool force_irqthreads; -+# endif - #else - #define force_irqthreads (0) - #endif -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 23bcfa71077f..3c26d0708709 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -24,6 +24,7 @@ - #include "internals.h" - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifndef CONFIG_PREEMPT_RT_BASE - __read_mostly bool force_irqthreads; - EXPORT_SYMBOL_GPL(force_irqthreads); - -@@ -33,6 +34,7 @@ static int __init setup_forced_irqthreads(char *arg) - return 0; - } - early_param("threadirqs", setup_forced_irqthreads); -+# endif - #endif - - static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch deleted file mode 100644 index a4886a69c..000000000 --- a/kernel/patches-4.19.x-rt/0072-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 4c6015fdf87fb7f0f38ce92c85d5630d79c6ae23 Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Mon, 28 May 2018 15:24:20 +0200 -Subject: [PATCH 072/328] Split IRQ-off and zone->lock while freeing pages from - PCP list #1 - -Split the IRQ-off section while accessing the PCP list from zone->lock -while freeing pages. -Introcude isolate_pcp_pages() which separates the pages from the PCP -list onto a temporary list and then free the temporary list via -free_pcppages_bulk(). - -Signed-off-by: Peter Zijlstra -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/page_alloc.c | 82 +++++++++++++++++++++++++++++++------------------ - 1 file changed, 52 insertions(+), 30 deletions(-) - -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index e5c610d711f3..0cfcd42517a4 100644 ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1095,7 +1095,7 @@ static inline void prefetch_buddy(struct page *page) - } - - /* -- * Frees a number of pages from the PCP lists -+ * Frees a number of pages which have been collected from the pcp lists. - * Assumes all pages on list are in same zone, and of same order. - * count is the number of pages to free. - * -@@ -1106,14 +1106,41 @@ static inline void prefetch_buddy(struct page *page) - * pinned" detection logic. - */ - static void free_pcppages_bulk(struct zone *zone, int count, -- struct per_cpu_pages *pcp) -+ struct list_head *head) -+{ -+ bool isolated_pageblocks; -+ struct page *page, *tmp; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&zone->lock, flags); -+ isolated_pageblocks = has_isolate_pageblock(zone); -+ -+ /* -+ * Use safe version since after __free_one_page(), -+ * page->lru.next will not point to original list. -+ */ -+ list_for_each_entry_safe(page, tmp, head, lru) { -+ int mt = get_pcppage_migratetype(page); -+ /* MIGRATE_ISOLATE page should not go to pcplists */ -+ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -+ /* Pageblock could have been isolated meanwhile */ -+ if (unlikely(isolated_pageblocks)) -+ mt = get_pageblock_migratetype(page); -+ -+ __free_one_page(page, page_to_pfn(page), zone, 0, mt); -+ trace_mm_page_pcpu_drain(page, 0, mt); -+ } -+ spin_unlock_irqrestore(&zone->lock, flags); -+} -+ -+static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp, -+ struct list_head *dst) -+ - { - int migratetype = 0; - int batch_free = 0; - int prefetch_nr = 0; -- bool isolated_pageblocks; -- struct page *page, *tmp; -- LIST_HEAD(head); -+ struct page *page; - - while (count) { - struct list_head *list; -@@ -1145,7 +1172,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, - if (bulkfree_pcp_prepare(page)) - continue; - -- list_add_tail(&page->lru, &head); -+ list_add_tail(&page->lru, dst); - - /* - * We are going to put the page back to the global -@@ -1160,26 +1187,6 @@ static void free_pcppages_bulk(struct zone *zone, int count, - prefetch_buddy(page); - } while (--count && --batch_free && !list_empty(list)); - } -- -- spin_lock(&zone->lock); -- isolated_pageblocks = has_isolate_pageblock(zone); -- -- /* -- * Use safe version since after __free_one_page(), -- * page->lru.next will not point to original list. -- */ -- list_for_each_entry_safe(page, tmp, &head, lru) { -- int mt = get_pcppage_migratetype(page); -- /* MIGRATE_ISOLATE page should not go to pcplists */ -- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -- /* Pageblock could have been isolated meanwhile */ -- if (unlikely(isolated_pageblocks)) -- mt = get_pageblock_migratetype(page); -- -- __free_one_page(page, page_to_pfn(page), zone, 0, mt); -- trace_mm_page_pcpu_drain(page, 0, mt); -- } -- spin_unlock(&zone->lock); - } - - static void free_one_page(struct zone *zone, -@@ -2544,13 +2551,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) - { - unsigned long flags; - int to_drain, batch; -+ LIST_HEAD(dst); - - local_irq_save(flags); - batch = READ_ONCE(pcp->batch); - to_drain = min(pcp->count, batch); - if (to_drain > 0) -- free_pcppages_bulk(zone, to_drain, pcp); -+ isolate_pcp_pages(to_drain, pcp, &dst); -+ - local_irq_restore(flags); -+ -+ if (to_drain > 0) -+ free_pcppages_bulk(zone, to_drain, &dst); - } - #endif - -@@ -2566,14 +2578,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) - unsigned long flags; - struct per_cpu_pageset *pset; - struct per_cpu_pages *pcp; -+ LIST_HEAD(dst); -+ int count; - - local_irq_save(flags); - pset = per_cpu_ptr(zone->pageset, cpu); - - pcp = &pset->pcp; -- if (pcp->count) -- free_pcppages_bulk(zone, pcp->count, pcp); -+ count = pcp->count; -+ if (count) -+ isolate_pcp_pages(count, pcp, &dst); -+ - local_irq_restore(flags); -+ -+ if (count) -+ free_pcppages_bulk(zone, count, &dst); - } - - /* -@@ -2795,7 +2814,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) - pcp->count++; - if (pcp->count >= pcp->high) { - unsigned long batch = READ_ONCE(pcp->batch); -- free_pcppages_bulk(zone, batch, pcp); -+ LIST_HEAD(dst); -+ -+ isolate_pcp_pages(batch, pcp, &dst); -+ free_pcppages_bulk(zone, batch, &dst); - } - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch deleted file mode 100644 index 48c39e6d8..000000000 --- a/kernel/patches-4.19.x-rt/0073-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch +++ /dev/null @@ -1,171 +0,0 @@ -From dba8e6d7ab200ab5fe544af8c6093bcb3d215320 Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Mon, 28 May 2018 15:24:21 +0200 -Subject: [PATCH 073/328] Split IRQ-off and zone->lock while freeing pages from - PCP list #2 - -Split the IRQ-off section while accessing the PCP list from zone->lock -while freeing pages. -Introcude isolate_pcp_pages() which separates the pages from the PCP -list onto a temporary list and then free the temporary list via -free_pcppages_bulk(). - -Signed-off-by: Peter Zijlstra -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/page_alloc.c | 60 ++++++++++++++++++++++++++++++++++++++++--------- - 1 file changed, 50 insertions(+), 10 deletions(-) - -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 0cfcd42517a4..9a4d150ea5b7 100644 ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -1105,8 +1105,8 @@ static inline void prefetch_buddy(struct page *page) - * And clear the zone's pages_scanned counter, to hold off the "all pages are - * pinned" detection logic. - */ --static void free_pcppages_bulk(struct zone *zone, int count, -- struct list_head *head) -+static void free_pcppages_bulk(struct zone *zone, struct list_head *head, -+ bool zone_retry) - { - bool isolated_pageblocks; - struct page *page, *tmp; -@@ -1121,12 +1121,27 @@ static void free_pcppages_bulk(struct zone *zone, int count, - */ - list_for_each_entry_safe(page, tmp, head, lru) { - int mt = get_pcppage_migratetype(page); -+ -+ if (page_zone(page) != zone) { -+ /* -+ * free_unref_page_list() sorts pages by zone. If we end -+ * up with pages from a different NUMA nodes belonging -+ * to the same ZONE index then we need to redo with the -+ * correct ZONE pointer. Skip the page for now, redo it -+ * on the next iteration. -+ */ -+ WARN_ON_ONCE(zone_retry == false); -+ if (zone_retry) -+ continue; -+ } -+ - /* MIGRATE_ISOLATE page should not go to pcplists */ - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); - /* Pageblock could have been isolated meanwhile */ - if (unlikely(isolated_pageblocks)) - mt = get_pageblock_migratetype(page); - -+ list_del(&page->lru); - __free_one_page(page, page_to_pfn(page), zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); - } -@@ -2562,7 +2577,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) - local_irq_restore(flags); - - if (to_drain > 0) -- free_pcppages_bulk(zone, to_drain, &dst); -+ free_pcppages_bulk(zone, &dst, false); - } - #endif - -@@ -2592,7 +2607,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) - local_irq_restore(flags); - - if (count) -- free_pcppages_bulk(zone, count, &dst); -+ free_pcppages_bulk(zone, &dst, false); - } - - /* -@@ -2785,7 +2800,8 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn) - return true; - } - --static void free_unref_page_commit(struct page *page, unsigned long pfn) -+static void free_unref_page_commit(struct page *page, unsigned long pfn, -+ struct list_head *dst) - { - struct zone *zone = page_zone(page); - struct per_cpu_pages *pcp; -@@ -2814,10 +2830,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) - pcp->count++; - if (pcp->count >= pcp->high) { - unsigned long batch = READ_ONCE(pcp->batch); -- LIST_HEAD(dst); - -- isolate_pcp_pages(batch, pcp, &dst); -- free_pcppages_bulk(zone, batch, &dst); -+ isolate_pcp_pages(batch, pcp, dst); - } - } - -@@ -2828,13 +2842,17 @@ void free_unref_page(struct page *page) - { - unsigned long flags; - unsigned long pfn = page_to_pfn(page); -+ struct zone *zone = page_zone(page); -+ LIST_HEAD(dst); - - if (!free_unref_page_prepare(page, pfn)) - return; - - local_irq_save(flags); -- free_unref_page_commit(page, pfn); -+ free_unref_page_commit(page, pfn, &dst); - local_irq_restore(flags); -+ if (!list_empty(&dst)) -+ free_pcppages_bulk(zone, &dst, false); - } - - /* -@@ -2845,6 +2863,11 @@ void free_unref_page_list(struct list_head *list) - struct page *page, *next; - unsigned long flags, pfn; - int batch_count = 0; -+ struct list_head dsts[__MAX_NR_ZONES]; -+ int i; -+ -+ for (i = 0; i < __MAX_NR_ZONES; i++) -+ INIT_LIST_HEAD(&dsts[i]); - - /* Prepare pages for freeing */ - list_for_each_entry_safe(page, next, list, lru) { -@@ -2857,10 +2880,12 @@ void free_unref_page_list(struct list_head *list) - local_irq_save(flags); - list_for_each_entry_safe(page, next, list, lru) { - unsigned long pfn = page_private(page); -+ enum zone_type type; - - set_page_private(page, 0); - trace_mm_page_free_batched(page); -- free_unref_page_commit(page, pfn); -+ type = page_zonenum(page); -+ free_unref_page_commit(page, pfn, &dsts[type]); - - /* - * Guard against excessive IRQ disabled times when we get -@@ -2873,6 +2898,21 @@ void free_unref_page_list(struct list_head *list) - } - } - local_irq_restore(flags); -+ -+ for (i = 0; i < __MAX_NR_ZONES; ) { -+ struct page *page; -+ struct zone *zone; -+ -+ if (list_empty(&dsts[i])) { -+ i++; -+ continue; -+ } -+ -+ page = list_first_entry(&dsts[i], struct page, lru); -+ zone = page_zone(page); -+ -+ free_pcppages_bulk(zone, &dsts[i], true); -+ } - } - - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch deleted file mode 100644 index 39cee1d14..000000000 --- a/kernel/patches-4.19.x-rt/0074-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ /dev/null @@ -1,618 +0,0 @@ -From abbdf6516e6ac19a92a3c08fc7a2f1ecc66c2bc6 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 28 May 2018 15:24:22 +0200 -Subject: [PATCH 074/328] mm/SLxB: change list_lock to raw_spinlock_t - -The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t -otherwise the interrupts won't be disabled on -RT. The locking rules remain -the same on !RT. -This patch changes it for SLAB and SLUB since both share the same header -file for struct kmem_cache_node defintion. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/slab.c | 94 +++++++++++++++++++++++++++---------------------------- - mm/slab.h | 2 +- - mm/slub.c | 50 ++++++++++++++--------------- - 3 files changed, 73 insertions(+), 73 deletions(-) - -diff --git a/mm/slab.c b/mm/slab.c -index 46f21e73db2f..38f6609343b3 100644 ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) - parent->shared = NULL; - parent->alien = NULL; - parent->colour_next = 0; -- spin_lock_init(&parent->list_lock); -+ raw_spin_lock_init(&parent->list_lock); - parent->free_objects = 0; - parent->free_touched = 0; - } -@@ -600,9 +600,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, - page_node = page_to_nid(page); - n = get_node(cachep, page_node); - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - free_block(cachep, &objp, 1, page_node, &list); -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - slabs_destroy(cachep, &list); - } -@@ -731,7 +731,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, - struct kmem_cache_node *n = get_node(cachep, node); - - if (ac->avail) { -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - /* - * Stuff objects into the remote nodes shared array first. - * That way we could avoid the overhead of putting the objects -@@ -742,7 +742,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, - - free_block(cachep, ac->entry, ac->avail, node, list); - ac->avail = 0; -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - } - } - -@@ -815,9 +815,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp, - slabs_destroy(cachep, &list); - } else { - n = get_node(cachep, page_node); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - free_block(cachep, &objp, 1, page_node, &list); -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); - } - return 1; -@@ -858,10 +858,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) - */ - n = get_node(cachep, node); - if (n) { -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + - cachep->num; -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - return 0; - } -@@ -940,7 +940,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, - goto fail; - - n = get_node(cachep, node); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - if (n->shared && force_change) { - free_block(cachep, n->shared->entry, - n->shared->avail, node, &list); -@@ -958,7 +958,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, - new_alien = NULL; - } - -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - slabs_destroy(cachep, &list); - - /* -@@ -997,7 +997,7 @@ static void cpuup_canceled(long cpu) - if (!n) - continue; - -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - - /* Free limit for this kmem_cache_node */ - n->free_limit -= cachep->batchcount; -@@ -1010,7 +1010,7 @@ static void cpuup_canceled(long cpu) - } - - if (!cpumask_empty(mask)) { -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - goto free_slab; - } - -@@ -1024,7 +1024,7 @@ static void cpuup_canceled(long cpu) - alien = n->alien; - n->alien = NULL; - -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - kfree(shared); - if (alien) { -@@ -1208,7 +1208,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * - /* - * Do not assume that spinlocks can be initialized via memcpy: - */ -- spin_lock_init(&ptr->list_lock); -+ raw_spin_lock_init(&ptr->list_lock); - - MAKE_ALL_LISTS(cachep, ptr, nodeid); - cachep->node[nodeid] = ptr; -@@ -1379,11 +1379,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) - for_each_kmem_cache_node(cachep, node, n) { - unsigned long total_slabs, free_slabs, free_objs; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - total_slabs = n->total_slabs; - free_slabs = n->free_slabs; - free_objs = n->free_objects; -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - - pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", - node, total_slabs - free_slabs, total_slabs, -@@ -2178,7 +2178,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) - { - #ifdef CONFIG_SMP - check_irq_off(); -- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); -+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); - #endif - } - -@@ -2186,7 +2186,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) - { - #ifdef CONFIG_SMP - check_irq_off(); -- assert_spin_locked(&get_node(cachep, node)->list_lock); -+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock); - #endif - } - -@@ -2226,9 +2226,9 @@ static void do_drain(void *arg) - check_irq_off(); - ac = cpu_cache_get(cachep); - n = get_node(cachep, node); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - free_block(cachep, ac->entry, ac->avail, node, &list); -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); - ac->avail = 0; - } -@@ -2246,9 +2246,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep) - drain_alien_cache(cachep, n->alien); - - for_each_kmem_cache_node(cachep, node, n) { -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - drain_array_locked(cachep, n->shared, node, true, &list); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - slabs_destroy(cachep, &list); - } -@@ -2270,10 +2270,10 @@ static int drain_freelist(struct kmem_cache *cache, - nr_freed = 0; - while (nr_freed < tofree && !list_empty(&n->slabs_free)) { - -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - p = n->slabs_free.prev; - if (p == &n->slabs_free) { -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - goto out; - } - -@@ -2286,7 +2286,7 @@ static int drain_freelist(struct kmem_cache *cache, - * to the cache. - */ - n->free_objects -= cache->num; -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - slab_destroy(cache, page); - nr_freed++; - } -@@ -2734,7 +2734,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) - INIT_LIST_HEAD(&page->lru); - n = get_node(cachep, page_to_nid(page)); - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - n->total_slabs++; - if (!page->active) { - list_add_tail(&page->lru, &(n->slabs_free)); -@@ -2744,7 +2744,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) - - STATS_INC_GROWN(cachep); - n->free_objects += cachep->num - page->active; -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - fixup_objfreelist_debug(cachep, &list); - } -@@ -2912,7 +2912,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) - { - struct page *page; - -- assert_spin_locked(&n->list_lock); -+ assert_raw_spin_locked(&n->list_lock); - page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); - if (!page) { - n->free_touched = 1; -@@ -2938,10 +2938,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, - if (!gfp_pfmemalloc_allowed(flags)) - return NULL; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - page = get_first_slab(n, true); - if (!page) { -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - return NULL; - } - -@@ -2950,7 +2950,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, - - fixup_slab_list(cachep, n, page, &list); - -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - fixup_objfreelist_debug(cachep, &list); - - return obj; -@@ -3009,7 +3009,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) - if (!n->free_objects && (!shared || !shared->avail)) - goto direct_grow; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - shared = READ_ONCE(n->shared); - - /* See if we can refill from the shared array */ -@@ -3033,7 +3033,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) - must_grow: - n->free_objects -= ac->avail; - alloc_done: -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - fixup_objfreelist_debug(cachep, &list); - - direct_grow: -@@ -3258,7 +3258,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - BUG_ON(!n); - - check_irq_off(); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - page = get_first_slab(n, false); - if (!page) - goto must_grow; -@@ -3276,12 +3276,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - - fixup_slab_list(cachep, n, page, &list); - -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - fixup_objfreelist_debug(cachep, &list); - return obj; - - must_grow: -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); - if (page) { - /* This slab isn't counted yet so don't update free_objects */ -@@ -3457,7 +3457,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) - - check_irq_off(); - n = get_node(cachep, node); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - if (n->shared) { - struct array_cache *shared_array = n->shared; - int max = shared_array->limit - shared_array->avail; -@@ -3486,7 +3486,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) - STATS_SET_FREEABLE(cachep, i); - } - #endif -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); - ac->avail -= batchcount; - memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); -@@ -3896,9 +3896,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, - - node = cpu_to_mem(cpu); - n = get_node(cachep, node); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - free_block(cachep, ac->entry, ac->avail, node, &list); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - slabs_destroy(cachep, &list); - } - free_percpu(prev); -@@ -4023,9 +4023,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, - return; - } - -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - drain_array_locked(cachep, ac, node, false, &list); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - slabs_destroy(cachep, &list); - } -@@ -4109,7 +4109,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) - - for_each_kmem_cache_node(cachep, node, n) { - check_irq_on(); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - - total_slabs += n->total_slabs; - free_slabs += n->free_slabs; -@@ -4118,7 +4118,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) - if (n->shared) - shared_avail += n->shared->avail; - -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - } - num_objs = total_slabs * cachep->num; - active_slabs = total_slabs - free_slabs; -@@ -4338,13 +4338,13 @@ static int leaks_show(struct seq_file *m, void *p) - for_each_kmem_cache_node(cachep, node, n) { - - check_irq_on(); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - - list_for_each_entry(page, &n->slabs_full, lru) - handle_slab(x, cachep, page); - list_for_each_entry(page, &n->slabs_partial, lru) - handle_slab(x, cachep, page); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - } - } while (!is_store_user_clean(cachep)); - -diff --git a/mm/slab.h b/mm/slab.h -index 9632772e14be..d6b01d61f768 100644 ---- a/mm/slab.h -+++ b/mm/slab.h -@@ -454,7 +454,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, - * The slab lists for all objects. - */ - struct kmem_cache_node { -- spinlock_t list_lock; -+ raw_spinlock_t list_lock; - - #ifdef CONFIG_SLAB - struct list_head slabs_partial; /* partial list first, better asm code */ -diff --git a/mm/slub.c b/mm/slub.c -index 9c3937c5ce38..ba20c68a9cfd 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -1167,7 +1167,7 @@ static noinline int free_debug_processing( - unsigned long uninitialized_var(flags); - int ret = 0; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - slab_lock(page); - - if (s->flags & SLAB_CONSISTENCY_CHECKS) { -@@ -1202,7 +1202,7 @@ static noinline int free_debug_processing( - bulk_cnt, cnt); - - slab_unlock(page); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - if (!ret) - slab_fix(s, "Object at 0x%p not freed", object); - return ret; -@@ -1802,7 +1802,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, - if (!n || !n->nr_partial) - return NULL; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - list_for_each_entry_safe(page, page2, &n->partial, lru) { - void *t; - -@@ -1827,7 +1827,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, - break; - - } -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - return object; - } - -@@ -2073,7 +2073,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, - * that acquire_slab() will see a slab page that - * is frozen - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } else { - m = M_FULL; -@@ -2084,7 +2084,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, - * slabs from diagnostic functions will not see - * any frozen slabs. - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } - -@@ -2119,7 +2119,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, - goto redo; - - if (lock) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - if (m == M_FREE) { - stat(s, DEACTIVATE_EMPTY); -@@ -2154,10 +2154,10 @@ static void unfreeze_partials(struct kmem_cache *s, - n2 = get_node(s, page_to_nid(page)); - if (n != n2) { - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - n = n2; -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - - do { -@@ -2186,7 +2186,7 @@ static void unfreeze_partials(struct kmem_cache *s, - } - - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - while (discard_page) { - page = discard_page; -@@ -2355,10 +2355,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, - unsigned long x = 0; - struct page *page; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += get_count(page); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return x; - } - #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2793,7 +2793,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - - do { - if (unlikely(n)) { -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - n = NULL; - } - prior = page->freelist; -@@ -2825,7 +2825,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - * Otherwise the list_lock will synchronize with - * other processors updating the list of slabs. - */ -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - } - } -@@ -2867,7 +2867,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - add_partial(n, page, DEACTIVATE_TO_TAIL); - stat(s, FREE_ADD_PARTIAL); - } -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return; - - slab_empty: -@@ -2882,7 +2882,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - remove_full(s, n, page); - } - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - stat(s, FREE_SLAB); - discard_slab(s, page); - } -@@ -3269,7 +3269,7 @@ static void - init_kmem_cache_node(struct kmem_cache_node *n) - { - n->nr_partial = 0; -- spin_lock_init(&n->list_lock); -+ raw_spin_lock_init(&n->list_lock); - INIT_LIST_HEAD(&n->partial); - #ifdef CONFIG_SLUB_DEBUG - atomic_long_set(&n->nr_slabs, 0); -@@ -3656,7 +3656,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) - struct page *page, *h; - - BUG_ON(irqs_disabled()); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - list_for_each_entry_safe(page, h, &n->partial, lru) { - if (!page->inuse) { - remove_partial(n, page); -@@ -3666,7 +3666,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) - "Objects remaining in %s on __kmem_cache_shutdown()"); - } - } -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - list_for_each_entry_safe(page, h, &discard, lru) - discard_slab(s, page); -@@ -3939,7 +3939,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) - for (i = 0; i < SHRINK_PROMOTE_MAX; i++) - INIT_LIST_HEAD(promote + i); - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - /* - * Build lists of slabs to discard or promote. -@@ -3970,7 +3970,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) - for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) - list_splice(promote + i, &n->partial); - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - - /* Release empty slabs */ - list_for_each_entry_safe(page, t, &discard, lru) -@@ -4384,7 +4384,7 @@ static int validate_slab_node(struct kmem_cache *s, - struct page *page; - unsigned long flags; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - list_for_each_entry(page, &n->partial, lru) { - validate_slab_slab(s, page, map); -@@ -4406,7 +4406,7 @@ static int validate_slab_node(struct kmem_cache *s, - s->name, count, atomic_long_read(&n->nr_slabs)); - - out: -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return count; - } - -@@ -4596,12 +4596,12 @@ static int list_locations(struct kmem_cache *s, char *buf, - if (!atomic_long_read(&n->nr_slabs)) - continue; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - process_slab(&t, s, page, alloc, map); - list_for_each_entry(page, &n->full, lru) - process_slab(&t, s, page, alloc, map); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - } - - for (i = 0; i < t.count; i++) { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch deleted file mode 100644 index 2b64f2bc7..000000000 --- a/kernel/patches-4.19.x-rt/0075-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ /dev/null @@ -1,222 +0,0 @@ -From b3c42996e1092269d6fac0652b22140250f11b7b Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 21 Jun 2018 17:29:19 +0200 -Subject: [PATCH 075/328] mm/SLUB: delay giving back empty slubs to IRQ enabled - regions - -__free_slab() is invoked with disabled interrupts which increases the -irq-off time while __free_pages() is doing the work. -Allow __free_slab() to be invoked with enabled interrupts and move -everything from interrupts-off invocations to a temporary per-CPU list -so it can be processed later. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/slub.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 69 insertions(+), 5 deletions(-) - -diff --git a/mm/slub.c b/mm/slub.c -index ba20c68a9cfd..224663e20772 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -1330,6 +1330,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, - - #endif /* CONFIG_SLUB_DEBUG */ - -+struct slub_free_list { -+ raw_spinlock_t lock; -+ struct list_head list; -+}; -+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); -+ - /* - * Hooks for other subsystems that check memory allocations. In a typical - * production configuration these hooks all should produce no code at all. -@@ -1684,6 +1690,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) - __free_pages(page, order); - } - -+static void free_delayed(struct list_head *h) -+{ -+ while (!list_empty(h)) { -+ struct page *page = list_first_entry(h, struct page, lru); -+ -+ list_del(&page->lru); -+ __free_slab(page->slab_cache, page); -+ } -+} -+ - static void rcu_free_slab(struct rcu_head *h) - { - struct page *page = container_of(h, struct page, rcu_head); -@@ -1695,6 +1711,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) - { - if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { - call_rcu(&page->rcu_head, rcu_free_slab); -+ } else if (irqs_disabled()) { -+ struct slub_free_list *f = this_cpu_ptr(&slub_free_list); -+ -+ raw_spin_lock(&f->lock); -+ list_add(&page->lru, &f->list); -+ raw_spin_unlock(&f->lock); - } else - __free_slab(s, page); - } -@@ -2223,14 +2245,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) - pobjects = oldpage->pobjects; - pages = oldpage->pages; - if (drain && pobjects > s->cpu_partial) { -+ struct slub_free_list *f; - unsigned long flags; -+ LIST_HEAD(tofree); - /* - * partial array is full. Move the existing - * set to the per node partial list. - */ - local_irq_save(flags); - unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); -+ f = this_cpu_ptr(&slub_free_list); -+ raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, &tofree); -+ raw_spin_unlock(&f->lock); - local_irq_restore(flags); -+ free_delayed(&tofree); - oldpage = NULL; - pobjects = 0; - pages = 0; -@@ -2300,7 +2329,22 @@ static bool has_cpu_slab(int cpu, void *info) - - static void flush_all(struct kmem_cache *s) - { -+ LIST_HEAD(tofree); -+ int cpu; -+ - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); -+ for_each_online_cpu(cpu) { -+ struct slub_free_list *f; -+ -+ if (!has_cpu_slab(cpu, s)) -+ continue; -+ -+ f = &per_cpu(slub_free_list, cpu); -+ raw_spin_lock_irq(&f->lock); -+ list_splice_init(&f->list, &tofree); -+ raw_spin_unlock_irq(&f->lock); -+ free_delayed(&tofree); -+ } - } - - /* -@@ -2498,8 +2542,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) - * already disabled (which is the case for bulk allocation). - */ - static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, -- unsigned long addr, struct kmem_cache_cpu *c) -+ unsigned long addr, struct kmem_cache_cpu *c, -+ struct list_head *to_free) - { -+ struct slub_free_list *f; - void *freelist; - struct page *page; - -@@ -2555,6 +2601,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - VM_BUG_ON(!c->page->frozen); - c->freelist = get_freepointer(s, freelist); - c->tid = next_tid(c->tid); -+ -+out: -+ f = this_cpu_ptr(&slub_free_list); -+ raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, to_free); -+ raw_spin_unlock(&f->lock); -+ - return freelist; - - new_slab: -@@ -2570,7 +2623,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - - if (unlikely(!freelist)) { - slab_out_of_memory(s, gfpflags, node); -- return NULL; -+ goto out; - } - - page = c->page; -@@ -2583,7 +2636,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - goto new_slab; /* Slab failed checks. Next slab needed */ - - deactivate_slab(s, page, get_freepointer(s, freelist), c); -- return freelist; -+ goto out; - } - - /* -@@ -2595,6 +2648,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - { - void *p; - unsigned long flags; -+ LIST_HEAD(tofree); - - local_irq_save(flags); - #ifdef CONFIG_PREEMPT -@@ -2606,8 +2660,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - c = this_cpu_ptr(s->cpu_slab); - #endif - -- p = ___slab_alloc(s, gfpflags, node, addr, c); -+ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); - local_irq_restore(flags); -+ free_delayed(&tofree); - return p; - } - -@@ -3085,6 +3140,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, - void **p) - { - struct kmem_cache_cpu *c; -+ LIST_HEAD(to_free); - int i; - - /* memcg and kmem_cache debug support */ -@@ -3108,7 +3164,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, - * of re-populating per CPU c->freelist - */ - p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, -- _RET_IP_, c); -+ _RET_IP_, c, &to_free); - if (unlikely(!p[i])) - goto error; - -@@ -3120,6 +3176,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, - } - c->tid = next_tid(c->tid); - local_irq_enable(); -+ free_delayed(&to_free); - - /* Clear memory outside IRQ disabled fastpath loop */ - if (unlikely(flags & __GFP_ZERO)) { -@@ -3134,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, - return i; - error: - local_irq_enable(); -+ free_delayed(&to_free); - slab_post_alloc_hook(s, flags, i, p); - __kmem_cache_free_bulk(s, i, p); - return 0; -@@ -4183,6 +4241,12 @@ void __init kmem_cache_init(void) - { - static __initdata struct kmem_cache boot_kmem_cache, - boot_kmem_cache_node; -+ int cpu; -+ -+ for_each_possible_cpu(cpu) { -+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); -+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); -+ } - - if (debug_guardpage_minorder()) - slub_max_order = 0; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch b/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch deleted file mode 100644 index 0aca78d05..000000000 --- a/kernel/patches-4.19.x-rt/0076-mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ /dev/null @@ -1,238 +0,0 @@ -From d3dec69695332f82af11a39b0fd327ad173c4715 Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:37 -0500 -Subject: [PATCH 076/328] mm: page_alloc: rt-friendly per-cpu pages - -rt-friendly per-cpu pages: convert the irqs-off per-cpu locking -method into a preemptible, explicit-per-cpu-locks method. - -Contains fixes from: - Peter Zijlstra - Thomas Gleixner - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - mm/page_alloc.c | 63 +++++++++++++++++++++++++++++++++---------------- - 1 file changed, 43 insertions(+), 20 deletions(-) - -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index 9a4d150ea5b7..d6f9be9c6635 100644 ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -60,6 +60,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -291,6 +292,18 @@ EXPORT_SYMBOL(nr_node_ids); - EXPORT_SYMBOL(nr_online_nodes); - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(pa_lock); -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define cpu_lock_irqsave(cpu, flags) \ -+ local_lock_irqsave_on(pa_lock, flags, cpu) -+# define cpu_unlock_irqrestore(cpu, flags) \ -+ local_unlock_irqrestore_on(pa_lock, flags, cpu) -+#else -+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) -+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) -+#endif -+ - int page_group_by_mobility_disabled __read_mostly; - - #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -1296,10 +1309,10 @@ static void __free_pages_ok(struct page *page, unsigned int order) - return; - - migratetype = get_pfnblock_migratetype(page, pfn); -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - __count_vm_events(PGFREE, 1 << order); - free_one_page(page_zone(page), page, pfn, order, migratetype); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - static void __init __free_pages_boot_core(struct page *page, unsigned int order) -@@ -2568,13 +2581,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) - int to_drain, batch; - LIST_HEAD(dst); - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - batch = READ_ONCE(pcp->batch); - to_drain = min(pcp->count, batch); - if (to_drain > 0) - isolate_pcp_pages(to_drain, pcp, &dst); - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - - if (to_drain > 0) - free_pcppages_bulk(zone, &dst, false); -@@ -2596,7 +2609,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) - LIST_HEAD(dst); - int count; - -- local_irq_save(flags); -+ cpu_lock_irqsave(cpu, flags); - pset = per_cpu_ptr(zone->pageset, cpu); - - pcp = &pset->pcp; -@@ -2604,7 +2617,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) - if (count) - isolate_pcp_pages(count, pcp, &dst); - -- local_irq_restore(flags); -+ cpu_unlock_irqrestore(cpu, flags); - - if (count) - free_pcppages_bulk(zone, &dst, false); -@@ -2642,6 +2655,7 @@ void drain_local_pages(struct zone *zone) - drain_pages(cpu); - } - -+#ifndef CONFIG_PREEMPT_RT_BASE - static void drain_local_pages_wq(struct work_struct *work) - { - /* -@@ -2655,6 +2669,7 @@ static void drain_local_pages_wq(struct work_struct *work) - drain_local_pages(NULL); - preempt_enable(); - } -+#endif - - /* - * Spill all the per-cpu pages from all CPUs back into the buddy allocator. -@@ -2721,7 +2736,14 @@ void drain_all_pages(struct zone *zone) - else - cpumask_clear_cpu(cpu, &cpus_with_pcps); - } -- -+#ifdef CONFIG_PREEMPT_RT_BASE -+ for_each_cpu(cpu, &cpus_with_pcps) { -+ if (zone) -+ drain_pages_zone(cpu, zone); -+ else -+ drain_pages(cpu); -+ } -+#else - for_each_cpu(cpu, &cpus_with_pcps) { - struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); - INIT_WORK(work, drain_local_pages_wq); -@@ -2729,6 +2751,7 @@ void drain_all_pages(struct zone *zone) - } - for_each_cpu(cpu, &cpus_with_pcps) - flush_work(per_cpu_ptr(&pcpu_drain, cpu)); -+#endif - - mutex_unlock(&pcpu_drain_mutex); - } -@@ -2848,9 +2871,9 @@ void free_unref_page(struct page *page) - if (!free_unref_page_prepare(page, pfn)) - return; - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - free_unref_page_commit(page, pfn, &dst); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - if (!list_empty(&dst)) - free_pcppages_bulk(zone, &dst, false); - } -@@ -2877,7 +2900,7 @@ void free_unref_page_list(struct list_head *list) - set_page_private(page, pfn); - } - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - list_for_each_entry_safe(page, next, list, lru) { - unsigned long pfn = page_private(page); - enum zone_type type; -@@ -2892,12 +2915,12 @@ void free_unref_page_list(struct list_head *list) - * a large list of pages to free. - */ - if (++batch_count == SWAP_CLUSTER_MAX) { -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - batch_count = 0; -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - } - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - - for (i = 0; i < __MAX_NR_ZONES; ) { - struct page *page; -@@ -3046,7 +3069,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, - struct page *page; - unsigned long flags; - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - page = __rmqueue_pcplist(zone, migratetype, pcp, list); -@@ -3054,7 +3077,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, - __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); - zone_statistics(preferred_zone, zone); - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - return page; - } - -@@ -3081,7 +3104,7 @@ struct page *rmqueue(struct zone *preferred_zone, - * allocate greater than order-1 page units with __GFP_NOFAIL. - */ - WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); -- spin_lock_irqsave(&zone->lock, flags); -+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); - - do { - page = NULL; -@@ -3101,14 +3124,14 @@ struct page *rmqueue(struct zone *preferred_zone, - - __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); - zone_statistics(preferred_zone, zone); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - - out: - VM_BUG_ON_PAGE(page && bad_range(zone, page), page); - return page; - - failed: -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - return NULL; - } - -@@ -8129,7 +8152,7 @@ void zone_pcp_reset(struct zone *zone) - struct per_cpu_pageset *pset; - - /* avoid races with drain_pages() */ -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - if (zone->pageset != &boot_pageset) { - for_each_online_cpu(cpu) { - pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8138,7 +8161,7 @@ void zone_pcp_reset(struct zone *zone) - free_percpu(zone->pageset); - zone->pageset = &boot_pageset; - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - #ifdef CONFIG_MEMORY_HOTREMOVE --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch b/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch deleted file mode 100644 index a3a58d20a..000000000 --- a/kernel/patches-4.19.x-rt/0077-mm-swap-Convert-to-percpu-locked.patch +++ /dev/null @@ -1,210 +0,0 @@ -From a03a4ee82ac46307acebdfe58e602aea9835a9a1 Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:51 -0500 -Subject: [PATCH 077/328] mm/swap: Convert to percpu locked - -Replace global locks (get_cpu + local_irq_save) with "local_locks()". -Currently there is one of for "rotate" and one for "swap". - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - include/linux/swap.h | 2 ++ - mm/compaction.c | 6 ++++-- - mm/page_alloc.c | 3 ++- - mm/swap.c | 38 ++++++++++++++++++++++---------------- - 4 files changed, 30 insertions(+), 19 deletions(-) - -diff --git a/include/linux/swap.h b/include/linux/swap.h -index ee8f9f554a9e..2ad000e362bd 100644 ---- a/include/linux/swap.h -+++ b/include/linux/swap.h -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - - struct notifier_block; -@@ -331,6 +332,7 @@ extern unsigned long nr_free_pagecache_pages(void); - - - /* linux/mm/swap.c */ -+DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); - extern void lru_cache_add(struct page *); - extern void lru_cache_add_anon(struct page *page); - extern void lru_cache_add_file(struct page *page); -diff --git a/mm/compaction.c b/mm/compaction.c -index 5079ddbec8f9..c40d3a13cbbd 100644 ---- a/mm/compaction.c -+++ b/mm/compaction.c -@@ -1668,10 +1668,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro - block_start_pfn(cc->migrate_pfn, cc->order); - - if (cc->last_migrated_pfn < current_block_start) { -- cpu = get_cpu(); -+ cpu = get_cpu_light(); -+ local_lock_irq(swapvec_lock); - lru_add_drain_cpu(cpu); -+ local_unlock_irq(swapvec_lock); - drain_local_pages(zone); -- put_cpu(); -+ put_cpu_light(); - /* No more flushing until we migrate again */ - cc->last_migrated_pfn = 0; - } -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index d6f9be9c6635..a1547f1be42c 100644 ---- a/mm/page_alloc.c -+++ b/mm/page_alloc.c -@@ -7242,8 +7242,9 @@ void __init free_area_init(unsigned long *zones_size) - - static int page_alloc_cpu_dead(unsigned int cpu) - { -- -+ local_lock_irq_on(swapvec_lock, cpu); - lru_add_drain_cpu(cpu); -+ local_unlock_irq_on(swapvec_lock, cpu); - drain_pages(cpu); - - /* -diff --git a/mm/swap.c b/mm/swap.c -index 45fdbfb6b2a6..92f994b962f0 100644 ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -33,6 +33,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); - #ifdef CONFIG_SMP - static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); - #endif -+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); -+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); - - /* - * This path almost never happens for VM activity - pages are normally -@@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page *page) - unsigned long flags; - - get_page(page); -- local_irq_save(flags); -+ local_lock_irqsave(rotate_lock, flags); - pvec = this_cpu_ptr(&lru_rotate_pvecs); - if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_move_tail(pvec); -- local_irq_restore(flags); -+ local_unlock_irqrestore(rotate_lock, flags); - } - } - -@@ -307,12 +310,13 @@ void activate_page(struct page *page) - { - page = compound_head(page); - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { -- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ activate_page_pvecs); - - get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); -- put_cpu_var(activate_page_pvecs); -+ put_locked_var(swapvec_lock, activate_page_pvecs); - } - } - -@@ -334,7 +338,7 @@ void activate_page(struct page *page) - - static void __lru_cache_activate_page(struct page *page) - { -- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); - int i; - - /* -@@ -356,7 +360,7 @@ static void __lru_cache_activate_page(struct page *page) - } - } - -- put_cpu_var(lru_add_pvec); -+ put_locked_var(swapvec_lock, lru_add_pvec); - } - - /* -@@ -398,12 +402,12 @@ EXPORT_SYMBOL(mark_page_accessed); - - static void __lru_cache_add(struct page *page) - { -- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); - - get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) - __pagevec_lru_add(pvec); -- put_cpu_var(lru_add_pvec); -+ put_locked_var(swapvec_lock, lru_add_pvec); - } - - /** -@@ -581,9 +585,9 @@ void lru_add_drain_cpu(int cpu) - unsigned long flags; - - /* No harm done if a racing interrupt already did this */ -- local_irq_save(flags); -+ local_lock_irqsave(rotate_lock, flags); - pagevec_move_tail(pvec); -- local_irq_restore(flags); -+ local_unlock_irqrestore(rotate_lock, flags); - } - - pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); -@@ -615,11 +619,12 @@ void deactivate_file_page(struct page *page) - return; - - if (likely(get_page_unless_zero(page))) { -- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ lru_deactivate_file_pvecs); - - if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); -- put_cpu_var(lru_deactivate_file_pvecs); -+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); - } - } - -@@ -634,19 +639,20 @@ void mark_page_lazyfree(struct page *page) - { - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && - !PageSwapCache(page) && !PageUnevictable(page)) { -- struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ lru_lazyfree_pvecs); - - get_page(page); - if (!pagevec_add(pvec, page) || PageCompound(page)) - pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); -- put_cpu_var(lru_lazyfree_pvecs); -+ put_locked_var(swapvec_lock, lru_lazyfree_pvecs); - } - } - - void lru_add_drain(void) - { -- lru_add_drain_cpu(get_cpu()); -- put_cpu(); -+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); -+ local_unlock_cpu(swapvec_lock); - } - - #ifdef CONFIG_SMP --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch b/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch deleted file mode 100644 index 8894d8756..000000000 --- a/kernel/patches-4.19.x-rt/0078-mm-perform-lru_add_drain_all-remotely.patch +++ /dev/null @@ -1,108 +0,0 @@ -From c48feb8fe1bad2aed0a15440a28da0bca8b5292a Mon Sep 17 00:00:00 2001 -From: Luiz Capitulino -Date: Fri, 27 May 2016 15:03:28 +0200 -Subject: [PATCH 078/328] mm: perform lru_add_drain_all() remotely - -lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run -on all CPUs that have non-empty LRU pagevecs and then waiting for -the scheduled work to complete. However, workqueue threads may never -have the chance to run on a CPU that's running a SCHED_FIFO task. -This causes lru_add_drain_all() to block forever. - -This commit solves this problem by changing lru_add_drain_all() -to drain the LRU pagevecs of remote CPUs. This is done by grabbing -swapvec_lock and calling lru_add_drain_cpu(). - -PS: This is based on an idea and initial implementation by - Rik van Riel. - -Signed-off-by: Rik van Riel -Signed-off-by: Luiz Capitulino -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/swap.c | 36 ++++++++++++++++++++++++++++++------ - 1 file changed, 30 insertions(+), 6 deletions(-) - -diff --git a/mm/swap.c b/mm/swap.c -index 92f994b962f0..3885645a45ce 100644 ---- a/mm/swap.c -+++ b/mm/swap.c -@@ -585,9 +585,15 @@ void lru_add_drain_cpu(int cpu) - unsigned long flags; - - /* No harm done if a racing interrupt already did this */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ local_lock_irqsave_on(rotate_lock, flags, cpu); -+ pagevec_move_tail(pvec); -+ local_unlock_irqrestore_on(rotate_lock, flags, cpu); -+#else - local_lock_irqsave(rotate_lock, flags); - pagevec_move_tail(pvec); - local_unlock_irqrestore(rotate_lock, flags); -+#endif - } - - pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); -@@ -657,6 +663,16 @@ void lru_add_drain(void) - - #ifdef CONFIG_SMP - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) -+{ -+ local_lock_on(swapvec_lock, cpu); -+ lru_add_drain_cpu(cpu); -+ local_unlock_on(swapvec_lock, cpu); -+} -+ -+#else -+ - static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); - - static void lru_add_drain_per_cpu(struct work_struct *dummy) -@@ -664,6 +680,16 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) - lru_add_drain(); - } - -+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) -+{ -+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); -+ -+ INIT_WORK(work, lru_add_drain_per_cpu); -+ queue_work_on(cpu, mm_percpu_wq, work); -+ cpumask_set_cpu(cpu, has_work); -+} -+#endif -+ - /* - * Doesn't need any cpu hotplug locking because we do rely on per-cpu - * kworkers being shut down before our page_alloc_cpu_dead callback is -@@ -688,21 +714,19 @@ void lru_add_drain_all(void) - cpumask_clear(&has_work); - - for_each_online_cpu(cpu) { -- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - - if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || - pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || - pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || - pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || -- need_activate_page_drain(cpu)) { -- INIT_WORK(work, lru_add_drain_per_cpu); -- queue_work_on(cpu, mm_percpu_wq, work); -- cpumask_set_cpu(cpu, &has_work); -- } -+ need_activate_page_drain(cpu)) -+ remote_lru_add_drain(cpu, &has_work); - } - -+#ifndef CONFIG_PREEMPT_RT_BASE - for_each_cpu(cpu, &has_work) - flush_work(&per_cpu(lru_add_drain_work, cpu)); -+#endif - - mutex_unlock(&lock); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch b/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch deleted file mode 100644 index 5a2e64b68..000000000 --- a/kernel/patches-4.19.x-rt/0079-mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch +++ /dev/null @@ -1,144 +0,0 @@ -From 4e41266214b4e88cf9fb9d2c20b5bbc83dcfbdcc Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:13 -0500 -Subject: [PATCH 079/328] mm/vmstat: Protect per cpu variables with preempt - disable on RT - -Disable preemption on -RT for the vmstat code. On vanila the code runs in -IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the -same ressources is not updated in parallel due to preemption. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - include/linux/vmstat.h | 4 ++++ - mm/vmstat.c | 12 ++++++++++++ - 2 files changed, 16 insertions(+) - -diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h -index f25cef84b41d..febee8649220 100644 ---- a/include/linux/vmstat.h -+++ b/include/linux/vmstat.h -@@ -54,7 +54,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); - */ - static inline void __count_vm_event(enum vm_event_item item) - { -+ preempt_disable_rt(); - raw_cpu_inc(vm_event_states.event[item]); -+ preempt_enable_rt(); - } - - static inline void count_vm_event(enum vm_event_item item) -@@ -64,7 +66,9 @@ static inline void count_vm_event(enum vm_event_item item) - - static inline void __count_vm_events(enum vm_event_item item, long delta) - { -+ preempt_disable_rt(); - raw_cpu_add(vm_event_states.event[item], delta); -+ preempt_enable_rt(); - } - - static inline void count_vm_events(enum vm_event_item item, long delta) -diff --git a/mm/vmstat.c b/mm/vmstat.c -index ce81b0a7d018..cfa2a3bbdf91 100644 ---- a/mm/vmstat.c -+++ b/mm/vmstat.c -@@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, - long x; - long t; - -+ preempt_disable_rt(); - x = delta + __this_cpu_read(*p); - - t = __this_cpu_read(pcp->stat_threshold); -@@ -329,6 +330,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, - x = 0; - } - __this_cpu_write(*p, x); -+ preempt_enable_rt(); - } - EXPORT_SYMBOL(__mod_zone_page_state); - -@@ -340,6 +342,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, - long x; - long t; - -+ preempt_disable_rt(); - x = delta + __this_cpu_read(*p); - - t = __this_cpu_read(pcp->stat_threshold); -@@ -349,6 +352,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, - x = 0; - } - __this_cpu_write(*p, x); -+ preempt_enable_rt(); - } - EXPORT_SYMBOL(__mod_node_page_state); - -@@ -381,6 +385,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_inc_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v > t)) { -@@ -389,6 +394,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) - zone_page_state_add(v + overstep, zone, item); - __this_cpu_write(*p, -overstep); - } -+ preempt_enable_rt(); - } - - void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) -@@ -397,6 +403,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) - s8 __percpu *p = pcp->vm_node_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_inc_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v > t)) { -@@ -405,6 +412,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) - node_page_state_add(v + overstep, pgdat, item); - __this_cpu_write(*p, -overstep); - } -+ preempt_enable_rt(); - } - - void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -@@ -425,6 +433,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_dec_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v < - t)) { -@@ -433,6 +442,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) - zone_page_state_add(v - overstep, zone, item); - __this_cpu_write(*p, overstep); - } -+ preempt_enable_rt(); - } - - void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) -@@ -441,6 +451,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) - s8 __percpu *p = pcp->vm_node_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_dec_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v < - t)) { -@@ -449,6 +460,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) - node_page_state_add(v - overstep, pgdat, item); - __this_cpu_write(*p, overstep); - } -+ preempt_enable_rt(); - } - - void __dec_zone_page_state(struct page *page, enum zone_stat_item item) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch b/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch deleted file mode 100644 index df160893a..000000000 --- a/kernel/patches-4.19.x-rt/0080-ARM-Initialize-split-page-table-locks-for-vector-pag.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 5af4ea849237914c63d3fd50079e6975aa28f9b2 Mon Sep 17 00:00:00 2001 -From: Frank Rowand -Date: Sat, 1 Oct 2011 18:58:13 -0700 -Subject: [PATCH 080/328] ARM: Initialize split page table locks for vector - page - -Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if -PREEMPT_RT_FULL=y because vectors_user_mapping() creates a -VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no -ptl->lock has been allocated for the page. An attempt to coredump -that page will result in a kernel NULL pointer dereference when -follow_page() attempts to lock the page. - -The call tree to the NULL pointer dereference is: - - do_notify_resume() - get_signal_to_deliver() - do_coredump() - elf_core_dump() - get_dump_page() - __get_user_pages() - follow_page() - pte_offset_map_lock() <----- a #define - ... - rt_spin_lock() - -The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. - -Signed-off-by: Frank Rowand -Cc: Frank -Cc: Peter Zijlstra -Link: http://lkml.kernel.org/r/4E87C535.2030907@am.sony.com -Signed-off-by: Thomas Gleixner ---- - arch/arm/kernel/process.c | 24 ++++++++++++++++++++++++ - 1 file changed, 24 insertions(+) - -diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c -index 82ab015bf42b..8d3c7ce34c24 100644 ---- a/arch/arm/kernel/process.c -+++ b/arch/arm/kernel/process.c -@@ -324,6 +324,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) - } - - #ifdef CONFIG_MMU -+/* -+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not -+ * initialized by pgtable_page_ctor() then a coredump of the vector page will -+ * fail. -+ */ -+static int __init vectors_user_mapping_init_page(void) -+{ -+ struct page *page; -+ unsigned long addr = 0xffff0000; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ page = pmd_page(*(pmd)); -+ -+ pgtable_page_ctor(page); -+ -+ return 0; -+} -+late_initcall(vectors_user_mapping_init_page); -+ - #ifdef CONFIG_KUSER_HELPERS - /* - * The vectors page is always readable from user space for the --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch b/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch deleted file mode 100644 index 71ed40eb6..000000000 --- a/kernel/patches-4.19.x-rt/0081-mm-Enable-SLUB-for-RT.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 5bc9982e129b20ecb8f6c32d3d342af5087ffdae Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 25 Oct 2012 10:32:35 +0100 -Subject: [PATCH 081/328] mm: Enable SLUB for RT - -Avoid the memory allocation in IRQ section - -Signed-off-by: Thomas Gleixner -[bigeasy: factor out everything except the kcalloc() workaorund ] -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/slub.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/mm/slub.c b/mm/slub.c -index 224663e20772..cbe47408c6eb 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -3680,6 +3680,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, - const char *text) - { - #ifdef CONFIG_SLUB_DEBUG -+#ifdef CONFIG_PREEMPT_RT_BASE -+ /* XXX move out of irq-off section */ -+ slab_err(s, page, text, s->name); -+#else -+ - void *addr = page_address(page); - void *p; - unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), -@@ -3701,6 +3706,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, - slab_unlock(page); - kfree(map); - #endif -+#endif - } - - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch b/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch deleted file mode 100644 index feb440ef6..000000000 --- a/kernel/patches-4.19.x-rt/0082-slub-Enable-irqs-for-__GFP_WAIT.patch +++ /dev/null @@ -1,47 +0,0 @@ -From c908b13d5d4d0e1154b41e93d9fb8349b7b7197a Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 9 Jan 2013 12:08:15 +0100 -Subject: [PATCH 082/328] slub: Enable irqs for __GFP_WAIT - -SYSTEM_RUNNING might be too late for enabling interrupts. Allocations -with GFP_WAIT can happen before that. So use this as an indicator. - -Signed-off-by: Thomas Gleixner ---- - mm/slub.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/mm/slub.c b/mm/slub.c -index cbe47408c6eb..81c32ceab228 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -1570,10 +1570,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) - void *start, *p; - int idx, order; - bool shuffle; -+ bool enableirqs = false; - - flags &= gfp_allowed_mask; - - if (gfpflags_allow_blocking(flags)) -+ enableirqs = true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (system_state > SYSTEM_BOOTING) -+ enableirqs = true; -+#endif -+ if (enableirqs) - local_irq_enable(); - - flags |= s->allocflags; -@@ -1632,7 +1639,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) - page->frozen = 1; - - out: -- if (gfpflags_allow_blocking(flags)) -+ if (enableirqs) - local_irq_disable(); - if (!page) - return NULL; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch b/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch deleted file mode 100644 index e909d74b8..000000000 --- a/kernel/patches-4.19.x-rt/0083-slub-Disable-SLUB_CPU_PARTIAL.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 41e0143022ef4180dafc14f033e72efa7ac652de Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 15 Apr 2015 19:00:47 +0200 -Subject: [PATCH 083/328] slub: Disable SLUB_CPU_PARTIAL - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 -|in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7 -|1 lock held by rcuop/7/87: -| #0: (rcu_callback){......}, at: [] rcu_nocb_kthread+0x1ca/0x5d0 -|Preemption disabled at:[] put_cpu_partial+0x29/0x220 -| -|CPU: 0 PID: 87 Comm: rcuop/7 Tainted: G W 4.0.0-rt0+ #477 -|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 -| 000000000007a9fc ffff88013987baf8 ffffffff817441c7 0000000000000007 -| 0000000000000000 ffff88013987bb18 ffffffff810eee51 0000000000000000 -| ffff88013fc10200 ffff88013987bb48 ffffffff8174a1c4 000000000007a9fc -|Call Trace: -| [] dump_stack+0x4f/0x90 -| [] ___might_sleep+0x121/0x1b0 -| [] rt_spin_lock+0x24/0x60 -| [] __free_pages_ok+0xaa/0x540 -| [] __free_pages+0x1d/0x30 -| [] __free_slab+0xc5/0x1e0 -| [] free_delayed+0x56/0x70 -| [] put_cpu_partial+0x14d/0x220 -| [] __slab_free+0x158/0x2c0 -| [] kmem_cache_free+0x221/0x2d0 -| [] file_free_rcu+0x2c/0x40 -| [] rcu_nocb_kthread+0x243/0x5d0 -| [] kthread+0xfc/0x120 -| [] ret_from_fork+0x58/0x90 - -Signed-off-by: Sebastian Andrzej Siewior ---- - init/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/init/Kconfig b/init/Kconfig -index 61e8b531649b..b4e88fb19c26 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1701,7 +1701,7 @@ config SLAB_FREELIST_HARDENED - - config SLUB_CPU_PARTIAL - default y -- depends on SLUB && SMP -+ depends on SLUB && SMP && !PREEMPT_RT_FULL - bool "SLUB per cpu partial cache" - help - Per cpu partial caches accellerate objects allocation and freeing --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch deleted file mode 100644 index 51d1f568f..000000000 --- a/kernel/patches-4.19.x-rt/0084-mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 5d6ef143b9e65be0cda54dcea9150f3cfa951ffd Mon Sep 17 00:00:00 2001 -From: Yang Shi -Date: Wed, 30 Oct 2013 11:48:33 -0700 -Subject: [PATCH 084/328] mm/memcontrol: Don't call schedule_work_on in - preemption disabled context - -The following trace is triggered when running ltp oom test cases: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 -in_atomic(): 1, irqs_disabled(): 0, pid: 17188, name: oom03 -Preemption disabled at:[] mem_cgroup_reclaim+0x90/0xe0 - -CPU: 2 PID: 17188 Comm: oom03 Not tainted 3.10.10-rt3 #2 -Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 -ffff88007684d730 ffff880070df9b58 ffffffff8169918d ffff880070df9b70 -ffffffff8106db31 ffff88007688b4a0 ffff880070df9b88 ffffffff8169d9c0 -ffff88007688b4a0 ffff880070df9bc8 ffffffff81059da1 0000000170df9bb0 -Call Trace: -[] dump_stack+0x19/0x1b -[] __might_sleep+0xf1/0x170 -[] rt_spin_lock+0x20/0x50 -[] queue_work_on+0x61/0x100 -[] drain_all_stock+0xe1/0x1c0 -[] mem_cgroup_reclaim+0x90/0xe0 -[] __mem_cgroup_try_charge+0x41a/0xc40 -[] ? release_pages+0x1b1/0x1f0 -[] ? sched_exec+0x40/0xb0 -[] mem_cgroup_charge_common+0x37/0x70 -[] mem_cgroup_newpage_charge+0x26/0x30 -[] handle_pte_fault+0x618/0x840 -[] ? unpin_current_cpu+0x16/0x70 -[] ? migrate_enable+0xd4/0x200 -[] handle_mm_fault+0x145/0x1e0 -[] __do_page_fault+0x1a1/0x4c0 -[] ? preempt_schedule_irq+0x4b/0x70 -[] ? retint_kernel+0x37/0x40 -[] do_page_fault+0xe/0x10 -[] page_fault+0x22/0x30 - -So, to prevent schedule_work_on from being called in preempt disabled context, -replace the pair of get/put_cpu() to get/put_cpu_light(). - - -Signed-off-by: Yang Shi -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/memcontrol.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 3a3d109dce21..cf9e81fb342d 100644 ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -2082,7 +2082,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) - * as well as workers from this path always operate on the local - * per-cpu data. CPU up doesn't touch memcg_stock at all. - */ -- curcpu = get_cpu(); -+ curcpu = get_cpu_light(); - for_each_online_cpu(cpu) { - struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - struct mem_cgroup *memcg; -@@ -2102,7 +2102,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) - } - css_put(&memcg->css); - } -- put_cpu(); -+ put_cpu_light(); - mutex_unlock(&percpu_charge_mutex); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch b/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch deleted file mode 100644 index 278c8aa39..000000000 --- a/kernel/patches-4.19.x-rt/0085-mm-memcontrol-Replace-local_irq_disable-with-local-l.patch +++ /dev/null @@ -1,123 +0,0 @@ -From ab73b56574e07b881a37aa1a4b0040a331352d7c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 28 Jan 2015 17:14:16 +0100 -Subject: [PATCH 085/328] mm/memcontrol: Replace local_irq_disable with local - locks - -There are a few local_irq_disable() which then take sleeping locks. This -patch converts them local locks. - -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/memcontrol.c | 24 ++++++++++++++++-------- - 1 file changed, 16 insertions(+), 8 deletions(-) - -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index cf9e81fb342d..421ac74450f6 100644 ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -69,6 +69,7 @@ - #include - #include - #include "slab.h" -+#include - - #include - -@@ -94,6 +95,8 @@ int do_swap_account __read_mostly; - #define do_swap_account 0 - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(event_lock); -+ - /* Whether legacy memory+swap accounting is active */ - static bool do_memsw_account(void) - { -@@ -4922,12 +4925,12 @@ static int mem_cgroup_move_account(struct page *page, - - ret = 0; - -- local_irq_disable(); -+ local_lock_irq(event_lock); - mem_cgroup_charge_statistics(to, page, compound, nr_pages); - memcg_check_events(to, page); - mem_cgroup_charge_statistics(from, page, compound, -nr_pages); - memcg_check_events(from, page); -- local_irq_enable(); -+ local_unlock_irq(event_lock); - out_unlock: - unlock_page(page); - out: -@@ -6046,10 +6049,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - - commit_charge(page, memcg, lrucare); - -- local_irq_disable(); -+ local_lock_irq(event_lock); - mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); - memcg_check_events(memcg, page); -- local_irq_enable(); -+ local_unlock_irq(event_lock); - - if (do_memsw_account() && PageSwapCache(page)) { - swp_entry_t entry = { .val = page_private(page) }; -@@ -6118,7 +6121,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) - memcg_oom_recover(ug->memcg); - } - -- local_irq_save(flags); -+ local_lock_irqsave(event_lock, flags); - __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); - __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); - __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); -@@ -6126,7 +6129,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) - __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); - __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); - memcg_check_events(ug->memcg, ug->dummy_page); -- local_irq_restore(flags); -+ local_unlock_irqrestore(event_lock, flags); - - if (!mem_cgroup_is_root(ug->memcg)) - css_put_many(&ug->memcg->css, nr_pages); -@@ -6289,10 +6292,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) - - commit_charge(newpage, memcg, false); - -- local_irq_save(flags); -+ local_lock_irqsave(event_lock, flags); - mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); - memcg_check_events(memcg, newpage); -- local_irq_restore(flags); -+ local_unlock_irqrestore(event_lock, flags); - } - - DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -6484,6 +6487,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) - struct mem_cgroup *memcg, *swap_memcg; - unsigned int nr_entries; - unsigned short oldid; -+ unsigned long flags; - - VM_BUG_ON_PAGE(PageLRU(page), page); - VM_BUG_ON_PAGE(page_count(page), page); -@@ -6529,13 +6533,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) - * important here to have the interrupts disabled because it is the - * only synchronisation we have for updating the per-CPU variables. - */ -+ local_lock_irqsave(event_lock, flags); -+#ifndef CONFIG_PREEMPT_RT_BASE - VM_BUG_ON(!irqs_disabled()); -+#endif - mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), - -nr_entries); - memcg_check_events(memcg, page); - - if (!mem_cgroup_is_root(memcg)) - css_put_many(&memcg->css, nr_entries); -+ local_unlock_irqrestore(event_lock, flags); - } - - /** --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch deleted file mode 100644 index dc8f568c2..000000000 --- a/kernel/patches-4.19.x-rt/0086-mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch +++ /dev/null @@ -1,202 +0,0 @@ -From c8eeca08279f6363742db822856cc18ae1b7bdbd Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Tue, 22 Mar 2016 11:16:09 +0100 -Subject: [PATCH 086/328] mm/zsmalloc: copy with get_cpu_var() and locking - -get_cpu_var() disables preemption and triggers a might_sleep() splat later. -This is replaced with get_locked_var(). -This bitspinlocks are replaced with a proper mutex which requires a slightly -larger struct to allocate. - -Signed-off-by: Mike Galbraith -[bigeasy: replace the bitspin_lock() with a mutex, get_locked_var(). Mike then -fixed the size magic] -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/zsmalloc.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 74 insertions(+), 6 deletions(-) - -diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c -index 85cc29c93d93..63e83b47fa99 100644 ---- a/mm/zsmalloc.c -+++ b/mm/zsmalloc.c -@@ -56,6 +56,7 @@ - #include - #include - #include -+#include - - #define ZSPAGE_MAGIC 0x58 - -@@ -73,9 +74,22 @@ - */ - #define ZS_MAX_ZSPAGE_ORDER 2 - #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) -- - #define ZS_HANDLE_SIZE (sizeof(unsigned long)) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+struct zsmalloc_handle { -+ unsigned long addr; -+ struct mutex lock; -+}; -+ -+#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) -+ -+#else -+ -+#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) -+#endif -+ - /* - * Object location (, ) is encoded as - * as single (unsigned long) handle value. -@@ -325,7 +339,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} - - static int create_cache(struct zs_pool *pool) - { -- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, -+ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, - 0, 0, NULL); - if (!pool->handle_cachep) - return 1; -@@ -349,10 +363,27 @@ static void destroy_cache(struct zs_pool *pool) - - static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) - { -- return (unsigned long)kmem_cache_alloc(pool->handle_cachep, -- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); -+ void *p; -+ -+ p = kmem_cache_alloc(pool->handle_cachep, -+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (p) { -+ struct zsmalloc_handle *zh = p; -+ -+ mutex_init(&zh->lock); -+ } -+#endif -+ return (unsigned long)p; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) -+{ -+ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); -+} -+#endif -+ - static void cache_free_handle(struct zs_pool *pool, unsigned long handle) - { - kmem_cache_free(pool->handle_cachep, (void *)handle); -@@ -371,12 +402,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) - - static void record_obj(unsigned long handle, unsigned long obj) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ WRITE_ONCE(zh->addr, obj); -+#else - /* - * lsb of @obj represents handle lock while other bits - * represent object value the handle is pointing so - * updating shouldn't do store tearing. - */ - WRITE_ONCE(*(unsigned long *)handle, obj); -+#endif - } - - /* zpool driver */ -@@ -458,6 +495,7 @@ MODULE_ALIAS("zpool-zsmalloc"); - - /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ - static DEFINE_PER_CPU(struct mapping_area, zs_map_area); -+static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); - - static bool is_zspage_isolated(struct zspage *zspage) - { -@@ -887,7 +925,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) - - static unsigned long handle_to_obj(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return zh->addr; -+#else - return *(unsigned long *)handle; -+#endif - } - - static unsigned long obj_to_head(struct page *page, void *obj) -@@ -901,22 +945,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) - - static inline int testpin_tag(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_is_locked(&zh->lock); -+#else - return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static inline int trypin_tag(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_trylock(&zh->lock); -+#else - return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static void pin_tag(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_lock(&zh->lock); -+#else - bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static void unpin_tag(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_unlock(&zh->lock); -+#else - bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static void reset_page(struct page *page) -@@ -1342,7 +1410,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, - class = pool->size_class[class_idx]; - off = (class->size * obj_idx) & ~PAGE_MASK; - -- area = &get_cpu_var(zs_map_area); -+ area = &get_locked_var(zs_map_area_lock, zs_map_area); - area->vm_mm = mm; - if (off + class->size <= PAGE_SIZE) { - /* this object is contained entirely within a page */ -@@ -1396,7 +1464,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) - - __zs_unmap_object(area, pages, off, class->size); - } -- put_cpu_var(zs_map_area); -+ put_locked_var(zs_map_area_lock, zs_map_area); - - migrate_read_unlock(zspage); - unpin_tag(handle); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch b/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch deleted file mode 100644 index b5589878b..000000000 --- a/kernel/patches-4.19.x-rt/0087-x86-mm-pat-disable-preemption-__split_large_page-aft.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 58952b3995a060f4fc7fbc02552ac489639d565e Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 11 Dec 2018 21:53:43 +0100 -Subject: [PATCH 087/328] x86/mm/pat: disable preemption __split_large_page() - after spin_lock() - -Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a -warning if __flush_tlb_all() is invoked in preemptible context. On !RT -the warning does not trigger because a spin lock is acquired which -disables preemption. On RT the spin lock does not disable preemption and -so the warning is seen. - -Disable preemption to avoid the warning __flush_tlb_all(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/mm/pageattr.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c -index 101f3ad0d6ad..0b0396261ca1 100644 ---- a/arch/x86/mm/pageattr.c -+++ b/arch/x86/mm/pageattr.c -@@ -687,12 +687,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, - pgprot_t ref_prot; - - spin_lock(&pgd_lock); -+ /* -+ * Keep preemption disabled after __flush_tlb_all() which expects not be -+ * preempted during the flush of the local TLB. -+ */ -+ preempt_disable(); - /* - * Check for races, another CPU might have split this page - * up for us already: - */ - tmp = _lookup_address_cpa(cpa, address, &level); - if (tmp != kpte) { -+ preempt_enable(); - spin_unlock(&pgd_lock); - return 1; - } -@@ -726,6 +732,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, - break; - - default: -+ preempt_enable(); - spin_unlock(&pgd_lock); - return 1; - } -@@ -764,6 +771,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, - * going on. - */ - __flush_tlb_all(); -+ preempt_enable(); - spin_unlock(&pgd_lock); - - return 0; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch b/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch deleted file mode 100644 index 883509d16..000000000 --- a/kernel/patches-4.19.x-rt/0088-radix-tree-use-local-locks.patch +++ /dev/null @@ -1,175 +0,0 @@ -From 7f7e6402ea1895f3d2197122d4379c46a3a7fe14 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 25 Jan 2017 16:34:27 +0100 -Subject: [PATCH 088/328] radix-tree: use local locks - -The preload functionality uses per-CPU variables and preempt-disable to -ensure that it does not switch CPUs during its usage. This patch adds -local_locks() instead preempt_disable() for the same purpose and to -remain preemptible on -RT. - -Cc: stable-rt@vger.kernel.org -Reported-and-debugged-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/idr.h | 5 +---- - include/linux/radix-tree.h | 7 ++----- - lib/radix-tree.c | 32 +++++++++++++++++++++++--------- - 3 files changed, 26 insertions(+), 18 deletions(-) - -diff --git a/include/linux/idr.h b/include/linux/idr.h -index b6c6151c7446..81c9df5c04fa 100644 ---- a/include/linux/idr.h -+++ b/include/linux/idr.h -@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const struct idr *idr) - * Each idr_preload() should be matched with an invocation of this - * function. See idr_preload() for details. - */ --static inline void idr_preload_end(void) --{ -- preempt_enable(); --} -+void idr_preload_end(void); - - /** - * idr_for_each_entry() - Iterate over an IDR's elements of a given type. -diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h -index 34149e8b5f73..affb0fc4c5b6 100644 ---- a/include/linux/radix-tree.h -+++ b/include/linux/radix-tree.h -@@ -330,6 +330,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, - int radix_tree_preload(gfp_t gfp_mask); - int radix_tree_maybe_preload(gfp_t gfp_mask); - int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); -+void radix_tree_preload_end(void); -+ - void radix_tree_init(void); - void *radix_tree_tag_set(struct radix_tree_root *, - unsigned long index, unsigned int tag); -@@ -349,11 +351,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, - unsigned int max_items, unsigned int tag); - int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); - --static inline void radix_tree_preload_end(void) --{ -- preempt_enable(); --} -- - int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); - int radix_tree_split(struct radix_tree_root *, unsigned long index, - unsigned new_order); -diff --git a/lib/radix-tree.c b/lib/radix-tree.c -index e5cab5c4e383..9309e813bc1f 100644 ---- a/lib/radix-tree.c -+++ b/lib/radix-tree.c -@@ -38,7 +38,7 @@ - #include - #include - #include -- -+#include - - /* Number of nodes in fully populated tree of given height */ - static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; -@@ -87,6 +87,7 @@ struct radix_tree_preload { - struct radix_tree_node *nodes; - }; - static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; -+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); - - static inline struct radix_tree_node *entry_to_node(void *ptr) - { -@@ -405,12 +406,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, - * succeed in getting a node here (and never reach - * kmem_cache_alloc) - */ -- rtp = this_cpu_ptr(&radix_tree_preloads); -+ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); - if (rtp->nr) { - ret = rtp->nodes; - rtp->nodes = ret->parent; - rtp->nr--; - } -+ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); - /* - * Update the allocation stack trace as this is more useful - * for debugging. -@@ -476,14 +478,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) - */ - gfp_mask &= ~__GFP_ACCOUNT; - -- preempt_disable(); -+ local_lock(radix_tree_preloads_lock); - rtp = this_cpu_ptr(&radix_tree_preloads); - while (rtp->nr < nr) { -- preempt_enable(); -+ local_unlock(radix_tree_preloads_lock); - node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); - if (node == NULL) - goto out; -- preempt_disable(); -+ local_lock(radix_tree_preloads_lock); - rtp = this_cpu_ptr(&radix_tree_preloads); - if (rtp->nr < nr) { - node->parent = rtp->nodes; -@@ -525,7 +527,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) - if (gfpflags_allow_blocking(gfp_mask)) - return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); - /* Preloading doesn't help anything with this gfp mask, skip it */ -- preempt_disable(); -+ local_lock(radix_tree_preloads_lock); - return 0; - } - EXPORT_SYMBOL(radix_tree_maybe_preload); -@@ -563,7 +565,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) - - /* Preloading doesn't help anything with this gfp mask, skip it */ - if (!gfpflags_allow_blocking(gfp_mask)) { -- preempt_disable(); -+ local_lock(radix_tree_preloads_lock); - return 0; - } - -@@ -597,6 +599,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) - return __radix_tree_preload(gfp_mask, nr_nodes); - } - -+void radix_tree_preload_end(void) -+{ -+ local_unlock(radix_tree_preloads_lock); -+} -+EXPORT_SYMBOL(radix_tree_preload_end); -+ - static unsigned radix_tree_load_root(const struct radix_tree_root *root, - struct radix_tree_node **nodep, unsigned long *maxindex) - { -@@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged); - void idr_preload(gfp_t gfp_mask) - { - if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) -- preempt_disable(); -+ local_lock(radix_tree_preloads_lock); - } - EXPORT_SYMBOL(idr_preload); - -+void idr_preload_end(void) -+{ -+ local_unlock(radix_tree_preloads_lock); -+} -+EXPORT_SYMBOL(idr_preload_end); -+ - int ida_pre_get(struct ida *ida, gfp_t gfp) - { - /* -@@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) - * to return to the ida_pre_get() step. - */ - if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) -- preempt_enable(); -+ local_unlock(radix_tree_preloads_lock); - - if (!this_cpu_read(ida_bitmap)) { - struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch b/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch deleted file mode 100644 index 1b9809ecd..000000000 --- a/kernel/patches-4.19.x-rt/0089-timers-Prepare-for-full-preemption.patch +++ /dev/null @@ -1,175 +0,0 @@ -From bb113ab4c9dea8e53db84af84d34864c4f3e9b2d Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 089/328] timers: Prepare for full preemption - -When softirqs can be preempted we need to make sure that cancelling -the timer from the active thread can not deadlock vs. a running timer -callback. Add a waitqueue to resolve that. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - include/linux/timer.h | 2 +- - kernel/sched/core.c | 9 +++++++-- - kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++++++---- - 3 files changed, 49 insertions(+), 7 deletions(-) - -diff --git a/include/linux/timer.h b/include/linux/timer.h -index 7b066fd38248..54627d046b3a 100644 ---- a/include/linux/timer.h -+++ b/include/linux/timer.h -@@ -172,7 +172,7 @@ extern void add_timer(struct timer_list *timer); - - extern int try_to_del_timer_sync(struct timer_list *timer); - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - extern int del_timer_sync(struct timer_list *timer); - #else - # define del_timer_sync(t) del_timer(t) -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index e6022cc2605b..986ed04425be 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -498,11 +498,14 @@ void resched_cpu(int cpu) - */ - int get_nohz_timer_target(void) - { -- int i, cpu = smp_processor_id(); -+ int i, cpu; - struct sched_domain *sd; - -+ preempt_disable_rt(); -+ cpu = smp_processor_id(); -+ - if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) -- return cpu; -+ goto preempt_en_rt; - - rcu_read_lock(); - for_each_domain(cpu, sd) { -@@ -521,6 +524,8 @@ int get_nohz_timer_target(void) - cpu = housekeeping_any_cpu(HK_FLAG_TIMER); - unlock: - rcu_read_unlock(); -+preempt_en_rt: -+ preempt_enable_rt(); - return cpu; - } - -diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index ae64cb819a9a..9019c9caf146 100644 ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -44,6 +44,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -197,6 +198,9 @@ EXPORT_SYMBOL(jiffies_64); - struct timer_base { - raw_spinlock_t lock; - struct timer_list *running_timer; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct swait_queue_head wait_for_running_timer; -+#endif - unsigned long clk; - unsigned long next_expiry; - unsigned int cpu; -@@ -1178,6 +1182,33 @@ void add_timer_on(struct timer_list *timer, int cpu) - } - EXPORT_SYMBOL_GPL(add_timer_on); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Wait for a running timer -+ */ -+static void wait_for_running_timer(struct timer_list *timer) -+{ -+ struct timer_base *base; -+ u32 tf = timer->flags; -+ -+ if (tf & TIMER_MIGRATING) -+ return; -+ -+ base = get_timer_base(tf); -+ swait_event_exclusive(base->wait_for_running_timer, -+ base->running_timer != timer); -+} -+ -+# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) -+#else -+static inline void wait_for_running_timer(struct timer_list *timer) -+{ -+ cpu_relax(); -+} -+ -+# define wakeup_timer_waiters(b) do { } while (0) -+#endif -+ - /** - * del_timer - deactivate a timer. - * @timer: the timer to be deactivated -@@ -1233,7 +1264,7 @@ int try_to_del_timer_sync(struct timer_list *timer) - } - EXPORT_SYMBOL(try_to_del_timer_sync); - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - /** - * del_timer_sync - deactivate a timer and wait for the handler to finish. - * @timer: the timer to be deactivated -@@ -1293,7 +1324,7 @@ int del_timer_sync(struct timer_list *timer) - int ret = try_to_del_timer_sync(timer); - if (ret >= 0) - return ret; -- cpu_relax(); -+ wait_for_running_timer(timer); - } - } - EXPORT_SYMBOL(del_timer_sync); -@@ -1354,13 +1385,16 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) - - fn = timer->function; - -- if (timer->flags & TIMER_IRQSAFE) { -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && -+ timer->flags & TIMER_IRQSAFE) { - raw_spin_unlock(&base->lock); - call_timer_fn(timer, fn); -+ base->running_timer = NULL; - raw_spin_lock(&base->lock); - } else { - raw_spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn); -+ base->running_timer = NULL; - raw_spin_lock_irq(&base->lock); - } - } -@@ -1683,8 +1717,8 @@ static inline void __run_timers(struct timer_base *base) - while (levels--) - expire_timers(base, heads + levels); - } -- base->running_timer = NULL; - raw_spin_unlock_irq(&base->lock); -+ wakeup_timer_waiters(base); - } - - /* -@@ -1929,6 +1963,9 @@ static void __init init_timer_cpu(int cpu) - base->cpu = cpu; - raw_spin_lock_init(&base->lock); - base->clk = jiffies; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ init_swait_queue_head(&base->wait_for_running_timer); -+#endif - } - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch b/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch deleted file mode 100644 index 2d63f704a..000000000 --- a/kernel/patches-4.19.x-rt/0090-x86-kvm-Require-const-tsc-for-RT.patch +++ /dev/null @@ -1,36 +0,0 @@ -From c87615728aaaf5a59575f49682ed6339a9cb116f Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 6 Nov 2011 12:26:18 +0100 -Subject: [PATCH 090/328] x86: kvm Require const tsc for RT - -Non constant TSC is a nightmare on bare metal already, but with -virtualization it becomes a complete disaster because the workarounds -are horrible latency wise. That's also a preliminary for running RT in -a guest on top of a RT host. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/kvm/x86.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index ade694f94a49..2dfb7c81743e 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -6873,6 +6873,13 @@ int kvm_arch_init(void *opaque) - goto out; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { -+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); -+ return -EOPNOTSUPP; -+ } -+#endif -+ - r = kvm_mmu_module_init(); - if (r) - goto out_free_percpu; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch deleted file mode 100644 index b46e06c28..000000000 --- a/kernel/patches-4.19.x-rt/0091-pci-switchtec-Don-t-use-completion-s-wait-queue.patch +++ /dev/null @@ -1,114 +0,0 @@ -From d46161e1a4fa5ff7b32deb64ac2e7698d0a56e49 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 4 Oct 2017 10:24:23 +0200 -Subject: [PATCH 091/328] pci/switchtec: Don't use completion's wait queue - -The poll callback is using completion's wait_queue_head_t member and -puts it in poll_wait() so the poll() caller gets a wakeup after command -completed. This does not work on RT because we don't have a -wait_queue_head_t in our completion implementation. Nobody in tree does -like that in tree so this is the only driver that breaks. - -Instead of using the completion here is waitqueue with a status flag as -suggested by Logan. - -I don't have the HW so I have no idea if it works as expected, so please -test it. - -Cc: Kurt Schwemmer -Cc: Logan Gunthorpe -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/pci/switch/switchtec.c | 22 +++++++++++++--------- - 1 file changed, 13 insertions(+), 9 deletions(-) - -diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c -index 43431816412c..a8df847bedee 100644 ---- a/drivers/pci/switch/switchtec.c -+++ b/drivers/pci/switch/switchtec.c -@@ -43,10 +43,11 @@ struct switchtec_user { - - enum mrpc_state state; - -- struct completion comp; -+ wait_queue_head_t cmd_comp; - struct kref kref; - struct list_head list; - -+ bool cmd_done; - u32 cmd; - u32 status; - u32 return_code; -@@ -68,7 +69,7 @@ static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) - stuser->stdev = stdev; - kref_init(&stuser->kref); - INIT_LIST_HEAD(&stuser->list); -- init_completion(&stuser->comp); -+ init_waitqueue_head(&stuser->cmd_comp); - stuser->event_cnt = atomic_read(&stdev->event_cnt); - - dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); -@@ -147,7 +148,7 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser) - kref_get(&stuser->kref); - stuser->read_len = sizeof(stuser->data); - stuser_set_state(stuser, MRPC_QUEUED); -- init_completion(&stuser->comp); -+ stuser->cmd_done = false; - list_add_tail(&stuser->list, &stdev->mrpc_queue); - - mrpc_cmd_submit(stdev); -@@ -184,7 +185,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev) - stuser->read_len); - - out: -- complete_all(&stuser->comp); -+ stuser->cmd_done = true; -+ wake_up_interruptible(&stuser->cmd_comp); - list_del_init(&stuser->list); - stuser_put(stuser); - stdev->mrpc_busy = 0; -@@ -454,10 +456,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data, - mutex_unlock(&stdev->mrpc_mutex); - - if (filp->f_flags & O_NONBLOCK) { -- if (!try_wait_for_completion(&stuser->comp)) -+ if (!READ_ONCE(stuser->cmd_done)) - return -EAGAIN; - } else { -- rc = wait_for_completion_interruptible(&stuser->comp); -+ rc = wait_event_interruptible(stuser->cmd_comp, -+ stuser->cmd_done); - if (rc < 0) - return rc; - } -@@ -505,7 +508,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) - struct switchtec_dev *stdev = stuser->stdev; - __poll_t ret = 0; - -- poll_wait(filp, &stuser->comp.wait, wait); -+ poll_wait(filp, &stuser->cmd_comp, wait); - poll_wait(filp, &stdev->event_wq, wait); - - if (lock_mutex_and_test_alive(stdev)) -@@ -513,7 +516,7 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) - - mutex_unlock(&stdev->mrpc_mutex); - -- if (try_wait_for_completion(&stuser->comp)) -+ if (READ_ONCE(stuser->cmd_done)) - ret |= EPOLLIN | EPOLLRDNORM; - - if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) -@@ -1037,7 +1040,8 @@ static void stdev_kill(struct switchtec_dev *stdev) - - /* Wake up and kill any users waiting on an MRPC request */ - list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { -- complete_all(&stuser->comp); -+ stuser->cmd_done = true; -+ wake_up_interruptible(&stuser->cmd_comp); - list_del_init(&stuser->list); - stuser_put(stuser); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch b/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch deleted file mode 100644 index 4e02896e0..000000000 --- a/kernel/patches-4.19.x-rt/0092-wait.h-include-atomic.h.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 5048f6148f091b822260d482639172336a66cbc3 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 28 Oct 2013 12:19:57 +0100 -Subject: [PATCH 092/328] wait.h: include atomic.h -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -| CC init/main.o -|In file included from include/linux/mmzone.h:9:0, -| from include/linux/gfp.h:4, -| from include/linux/kmod.h:22, -| from include/linux/module.h:13, -| from init/main.c:15: -|include/linux/wait.h: In function ‘wait_on_atomic_t’: -|include/linux/wait.h:982:2: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration] -| if (atomic_read(val) == 0) -| ^ - -This pops up on ARM. Non-RT gets its atomic.h include from spinlock.h - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/wait.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/include/linux/wait.h b/include/linux/wait.h -index ed7c122cb31f..2b5ef8e94d19 100644 ---- a/include/linux/wait.h -+++ b/include/linux/wait.h -@@ -10,6 +10,7 @@ - - #include - #include -+#include - - typedef struct wait_queue_entry wait_queue_entry_t; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch b/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch deleted file mode 100644 index 8dc5a28a1..000000000 --- a/kernel/patches-4.19.x-rt/0093-work-simple-Simple-work-queue-implemenation.patch +++ /dev/null @@ -1,245 +0,0 @@ -From 370c2439db620266b1bb104cc624841eec515e5c Mon Sep 17 00:00:00 2001 -From: Daniel Wagner -Date: Fri, 11 Jul 2014 15:26:11 +0200 -Subject: [PATCH 093/328] work-simple: Simple work queue implemenation - -Provides a framework for enqueuing callbacks from irq context -PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. - -Bases on wait-simple. - -Cc: Sebastian Andrzej Siewior -Signed-off-by: Daniel Wagner ---- - include/linux/swork.h | 24 ++++++ - kernel/sched/Makefile | 2 +- - kernel/sched/swork.c | 173 ++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 198 insertions(+), 1 deletion(-) - create mode 100644 include/linux/swork.h - create mode 100644 kernel/sched/swork.c - -diff --git a/include/linux/swork.h b/include/linux/swork.h -new file mode 100644 -index 000000000000..f175fa9a6016 ---- /dev/null -+++ b/include/linux/swork.h -@@ -0,0 +1,24 @@ -+#ifndef _LINUX_SWORK_H -+#define _LINUX_SWORK_H -+ -+#include -+ -+struct swork_event { -+ struct list_head item; -+ unsigned long flags; -+ void (*func)(struct swork_event *); -+}; -+ -+static inline void INIT_SWORK(struct swork_event *event, -+ void (*func)(struct swork_event *)) -+{ -+ event->flags = 0; -+ event->func = func; -+} -+ -+bool swork_queue(struct swork_event *sev); -+ -+int swork_get(void); -+void swork_put(void); -+ -+#endif /* _LINUX_SWORK_H */ -diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile -index 7fe183404c38..2b765aa4e2c4 100644 ---- a/kernel/sched/Makefile -+++ b/kernel/sched/Makefile -@@ -18,7 +18,7 @@ endif - - obj-y += core.o loadavg.o clock.o cputime.o - obj-y += idle.o fair.o rt.o deadline.o --obj-y += wait.o wait_bit.o swait.o completion.o -+obj-y += wait.o wait_bit.o swait.o swork.o completion.o - - obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o - obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o -diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c -new file mode 100644 -index 000000000000..a5b89fdacf19 ---- /dev/null -+++ b/kernel/sched/swork.c -@@ -0,0 +1,173 @@ -+/* -+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de -+ * -+ * Provides a framework for enqueuing callbacks from irq context -+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define SWORK_EVENT_PENDING (1 << 0) -+ -+static DEFINE_MUTEX(worker_mutex); -+static struct sworker *glob_worker; -+ -+struct sworker { -+ struct list_head events; -+ struct swait_queue_head wq; -+ -+ raw_spinlock_t lock; -+ -+ struct task_struct *task; -+ int refs; -+}; -+ -+static bool swork_readable(struct sworker *worker) -+{ -+ bool r; -+ -+ if (kthread_should_stop()) -+ return true; -+ -+ raw_spin_lock_irq(&worker->lock); -+ r = !list_empty(&worker->events); -+ raw_spin_unlock_irq(&worker->lock); -+ -+ return r; -+} -+ -+static int swork_kthread(void *arg) -+{ -+ struct sworker *worker = arg; -+ -+ for (;;) { -+ swait_event_interruptible_exclusive(worker->wq, -+ swork_readable(worker)); -+ if (kthread_should_stop()) -+ break; -+ -+ raw_spin_lock_irq(&worker->lock); -+ while (!list_empty(&worker->events)) { -+ struct swork_event *sev; -+ -+ sev = list_first_entry(&worker->events, -+ struct swork_event, item); -+ list_del(&sev->item); -+ raw_spin_unlock_irq(&worker->lock); -+ -+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, -+ &sev->flags)); -+ sev->func(sev); -+ raw_spin_lock_irq(&worker->lock); -+ } -+ raw_spin_unlock_irq(&worker->lock); -+ } -+ return 0; -+} -+ -+static struct sworker *swork_create(void) -+{ -+ struct sworker *worker; -+ -+ worker = kzalloc(sizeof(*worker), GFP_KERNEL); -+ if (!worker) -+ return ERR_PTR(-ENOMEM); -+ -+ INIT_LIST_HEAD(&worker->events); -+ raw_spin_lock_init(&worker->lock); -+ init_swait_queue_head(&worker->wq); -+ -+ worker->task = kthread_run(swork_kthread, worker, "kswork"); -+ if (IS_ERR(worker->task)) { -+ kfree(worker); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ return worker; -+} -+ -+static void swork_destroy(struct sworker *worker) -+{ -+ kthread_stop(worker->task); -+ -+ WARN_ON(!list_empty(&worker->events)); -+ kfree(worker); -+} -+ -+/** -+ * swork_queue - queue swork -+ * -+ * Returns %false if @work was already on a queue, %true otherwise. -+ * -+ * The work is queued and processed on a random CPU -+ */ -+bool swork_queue(struct swork_event *sev) -+{ -+ unsigned long flags; -+ -+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) -+ return false; -+ -+ raw_spin_lock_irqsave(&glob_worker->lock, flags); -+ list_add_tail(&sev->item, &glob_worker->events); -+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags); -+ -+ swake_up_one(&glob_worker->wq); -+ return true; -+} -+EXPORT_SYMBOL_GPL(swork_queue); -+ -+/** -+ * swork_get - get an instance of the sworker -+ * -+ * Returns an negative error code if the initialization if the worker did not -+ * work, %0 otherwise. -+ * -+ */ -+int swork_get(void) -+{ -+ struct sworker *worker; -+ -+ mutex_lock(&worker_mutex); -+ if (!glob_worker) { -+ worker = swork_create(); -+ if (IS_ERR(worker)) { -+ mutex_unlock(&worker_mutex); -+ return -ENOMEM; -+ } -+ -+ glob_worker = worker; -+ } -+ -+ glob_worker->refs++; -+ mutex_unlock(&worker_mutex); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(swork_get); -+ -+/** -+ * swork_put - puts an instance of the sworker -+ * -+ * Will destroy the sworker thread. This function must not be called until all -+ * queued events have been completed. -+ */ -+void swork_put(void) -+{ -+ mutex_lock(&worker_mutex); -+ -+ glob_worker->refs--; -+ if (glob_worker->refs > 0) -+ goto out; -+ -+ swork_destroy(glob_worker); -+ glob_worker = NULL; -+out: -+ mutex_unlock(&worker_mutex); -+} -+EXPORT_SYMBOL_GPL(swork_put); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch b/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch deleted file mode 100644 index d3f38820c..000000000 --- a/kernel/patches-4.19.x-rt/0094-work-simple-drop-a-shit-statement-in-SWORK_EVENT_PEN.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 8c88098a7081d7cd354fb9e2a64598e6e10ce525 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 10 Sep 2018 18:00:31 +0200 -Subject: [PATCH 094/328] work-simple: drop a shit statement in - SWORK_EVENT_PENDING - -Dan Carpenter reported -| smatch warnings: -|kernel/sched/swork.c:63 swork_kthread() warn: test_bit() takes a bit number - -This is not a bug because we shift by zero (and use the same value in -both places). -Nevertheless I'm dropping that shift by zero to keep smatch quiet. - -Cc: Daniel Wagner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/swork.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c -index a5b89fdacf19..c90d14b9b126 100644 ---- a/kernel/sched/swork.c -+++ b/kernel/sched/swork.c -@@ -12,7 +12,7 @@ - #include - #include - --#define SWORK_EVENT_PENDING (1 << 0) -+#define SWORK_EVENT_PENDING 1 - - static DEFINE_MUTEX(worker_mutex); - static struct sworker *glob_worker; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch b/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch deleted file mode 100644 index 1a63fb2da..000000000 --- a/kernel/patches-4.19.x-rt/0095-completion-Use-simple-wait-queues.patch +++ /dev/null @@ -1,390 +0,0 @@ -From ae24940034c02ed671e3a5cc9c4cf31ebfc24fed Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 11 Jan 2013 11:23:51 +0100 -Subject: [PATCH 095/328] completion: Use simple wait queues - -Completions have no long lasting callbacks and therefor do not need -the complex waitqueue variant. Use simple waitqueues which reduces the -contention on the waitqueue lock. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/platforms/ps3/device-init.c | 4 +-- - .../wireless/intersil/orinoco/orinoco_usb.c | 4 +-- - drivers/usb/gadget/function/f_fs.c | 2 +- - drivers/usb/gadget/legacy/inode.c | 4 +-- - include/linux/completion.h | 8 ++--- - include/linux/suspend.h | 6 ++++ - include/linux/swait.h | 2 ++ - kernel/power/hibernate.c | 7 ++++ - kernel/power/suspend.c | 4 +++ - kernel/sched/completion.c | 34 +++++++++---------- - kernel/sched/core.c | 10 ++++-- - kernel/sched/swait.c | 21 +++++++++++- - 12 files changed, 75 insertions(+), 31 deletions(-) - -diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c -index e7075aaff1bb..1580464a9d5b 100644 ---- a/arch/powerpc/platforms/ps3/device-init.c -+++ b/arch/powerpc/platforms/ps3/device-init.c -@@ -752,8 +752,8 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev, - } - pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - -- res = wait_event_interruptible(dev->done.wait, -- dev->done.done || kthread_should_stop()); -+ res = swait_event_interruptible_exclusive(dev->done.wait, -+ dev->done.done || kthread_should_stop()); - if (kthread_should_stop()) - res = -EINTR; - if (res) { -diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c -index b704e4bce171..c364abaac548 100644 ---- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c -+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c -@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, - while (!ctx->done.done && msecs--) - udelay(1000); - } else { -- wait_event_interruptible(ctx->done.wait, -- ctx->done.done); -+ swait_event_interruptible_exclusive(ctx->done.wait, -+ ctx->done.done); - } - break; - default: -diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c -index 2050993fb58b..e2ca75a6e241 100644 ---- a/drivers/usb/gadget/function/f_fs.c -+++ b/drivers/usb/gadget/function/f_fs.c -@@ -1626,7 +1626,7 @@ static void ffs_data_put(struct ffs_data *ffs) - pr_info("%s(): freeing\n", __func__); - ffs_data_clear(ffs); - BUG_ON(waitqueue_active(&ffs->ev.waitq) || -- waitqueue_active(&ffs->ep0req_completion.wait) || -+ swait_active(&ffs->ep0req_completion.wait) || - waitqueue_active(&ffs->wait)); - destroy_workqueue(ffs->io_completion_wq); - kfree(ffs->dev_name); -diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c -index 37ca0e669bd8..56a16587b221 100644 ---- a/drivers/usb/gadget/legacy/inode.c -+++ b/drivers/usb/gadget/legacy/inode.c -@@ -343,7 +343,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) - spin_unlock_irq (&epdata->dev->lock); - - if (likely (value == 0)) { -- value = wait_event_interruptible (done.wait, done.done); -+ value = swait_event_interruptible_exclusive(done.wait, done.done); - if (value != 0) { - spin_lock_irq (&epdata->dev->lock); - if (likely (epdata->ep != NULL)) { -@@ -352,7 +352,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) - usb_ep_dequeue (epdata->ep, epdata->req); - spin_unlock_irq (&epdata->dev->lock); - -- wait_event (done.wait, done.done); -+ swait_event_exclusive(done.wait, done.done); - if (epdata->status == -ECONNRESET) - epdata->status = -EINTR; - } else { -diff --git a/include/linux/completion.h b/include/linux/completion.h -index 519e94915d18..bf8e77001f18 100644 ---- a/include/linux/completion.h -+++ b/include/linux/completion.h -@@ -9,7 +9,7 @@ - * See kernel/sched/completion.c for details. - */ - --#include -+#include - - /* - * struct completion - structure used to maintain state for a "completion" -@@ -25,7 +25,7 @@ - */ - struct completion { - unsigned int done; -- wait_queue_head_t wait; -+ struct swait_queue_head wait; - }; - - #define init_completion_map(x, m) __init_completion(x) -@@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {} - static inline void complete_release(struct completion *x) {} - - #define COMPLETION_INITIALIZER(work) \ -- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } -+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } - - #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ - (*({ init_completion_map(&(work), &(map)); &(work); })) -@@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {} - static inline void __init_completion(struct completion *x) - { - x->done = 0; -- init_waitqueue_head(&x->wait); -+ init_swait_queue_head(&x->wait); - } - - /** -diff --git a/include/linux/suspend.h b/include/linux/suspend.h -index 3f529ad9a9d2..328439ce71f5 100644 ---- a/include/linux/suspend.h -+++ b/include/linux/suspend.h -@@ -196,6 +196,12 @@ struct platform_s2idle_ops { - void (*end)(void); - }; - -+#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) -+extern bool pm_in_action; -+#else -+# define pm_in_action false -+#endif -+ - #ifdef CONFIG_SUSPEND - extern suspend_state_t mem_sleep_current; - extern suspend_state_t mem_sleep_default; -diff --git a/include/linux/swait.h b/include/linux/swait.h -index 73e06e9986d4..f426a0661aa0 100644 ---- a/include/linux/swait.h -+++ b/include/linux/swait.h -@@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) - extern void swake_up_one(struct swait_queue_head *q); - extern void swake_up_all(struct swait_queue_head *q); - extern void swake_up_locked(struct swait_queue_head *q); -+extern void swake_up_all_locked(struct swait_queue_head *q); - -+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); - extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); - extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); - -diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c -index f5ce9f7ec132..0f00ba01376f 100644 ---- a/kernel/power/hibernate.c -+++ b/kernel/power/hibernate.c -@@ -690,6 +690,10 @@ static int load_image_and_restore(void) - return error; - } - -+#ifndef CONFIG_SUSPEND -+bool pm_in_action; -+#endif -+ - /** - * hibernate - Carry out system hibernation, including saving the image. - */ -@@ -703,6 +707,8 @@ int hibernate(void) - return -EPERM; - } - -+ pm_in_action = true; -+ - lock_system_sleep(); - /* The snapshot device should not be opened while we're running */ - if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { -@@ -781,6 +787,7 @@ int hibernate(void) - atomic_inc(&snapshot_device_available); - Unlock: - unlock_system_sleep(); -+ pm_in_action = false; - pr_info("hibernation exit\n"); - - return error; -diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c -index 0bd595a0b610..a4456772d98e 100644 ---- a/kernel/power/suspend.c -+++ b/kernel/power/suspend.c -@@ -600,6 +600,8 @@ static int enter_state(suspend_state_t state) - return error; - } - -+bool pm_in_action; -+ - /** - * pm_suspend - Externally visible function for suspending the system. - * @state: System sleep state to enter. -@@ -614,6 +616,7 @@ int pm_suspend(suspend_state_t state) - if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) - return -EINVAL; - -+ pm_in_action = true; - pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); - error = enter_state(state); - if (error) { -@@ -623,6 +626,7 @@ int pm_suspend(suspend_state_t state) - suspend_stats.success++; - } - pr_info("suspend exit\n"); -+ pm_in_action = false; - return error; - } - EXPORT_SYMBOL(pm_suspend); -diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c -index a1ad5b7d5521..755a58084978 100644 ---- a/kernel/sched/completion.c -+++ b/kernel/sched/completion.c -@@ -29,12 +29,12 @@ void complete(struct completion *x) - { - unsigned long flags; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - - if (x->done != UINT_MAX) - x->done++; -- __wake_up_locked(&x->wait, TASK_NORMAL, 1); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ swake_up_locked(&x->wait); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - } - EXPORT_SYMBOL(complete); - -@@ -58,10 +58,10 @@ void complete_all(struct completion *x) - { - unsigned long flags; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - x->done = UINT_MAX; -- __wake_up_locked(&x->wait, TASK_NORMAL, 0); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ swake_up_all_locked(&x->wait); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - } - EXPORT_SYMBOL(complete_all); - -@@ -70,20 +70,20 @@ do_wait_for_common(struct completion *x, - long (*action)(long), long timeout, int state) - { - if (!x->done) { -- DECLARE_WAITQUEUE(wait, current); -+ DECLARE_SWAITQUEUE(wait); - -- __add_wait_queue_entry_tail_exclusive(&x->wait, &wait); -+ __prepare_to_swait(&x->wait, &wait); - do { - if (signal_pending_state(state, current)) { - timeout = -ERESTARTSYS; - break; - } - __set_current_state(state); -- spin_unlock_irq(&x->wait.lock); -+ raw_spin_unlock_irq(&x->wait.lock); - timeout = action(timeout); -- spin_lock_irq(&x->wait.lock); -+ raw_spin_lock_irq(&x->wait.lock); - } while (!x->done && timeout); -- __remove_wait_queue(&x->wait, &wait); -+ __finish_swait(&x->wait, &wait); - if (!x->done) - return timeout; - } -@@ -100,9 +100,9 @@ __wait_for_common(struct completion *x, - - complete_acquire(x); - -- spin_lock_irq(&x->wait.lock); -+ raw_spin_lock_irq(&x->wait.lock); - timeout = do_wait_for_common(x, action, timeout, state); -- spin_unlock_irq(&x->wait.lock); -+ raw_spin_unlock_irq(&x->wait.lock); - - complete_release(x); - -@@ -291,12 +291,12 @@ bool try_wait_for_completion(struct completion *x) - if (!READ_ONCE(x->done)) - return false; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - if (!x->done) - ret = false; - else if (x->done != UINT_MAX) - x->done--; -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - return ret; - } - EXPORT_SYMBOL(try_wait_for_completion); -@@ -322,8 +322,8 @@ bool completion_done(struct completion *x) - * otherwise we can end up freeing the completion before complete() - * is done referencing it. - */ -- spin_lock_irqsave(&x->wait.lock, flags); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - return true; - } - EXPORT_SYMBOL(completion_done); -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 986ed04425be..584978640512 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7154,7 +7154,10 @@ void migrate_disable(void) - return; - } - #ifdef CONFIG_SCHED_DEBUG -- WARN_ON_ONCE(p->migrate_disable_atomic); -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } - #endif - - if (p->migrate_disable) { -@@ -7184,7 +7187,10 @@ void migrate_enable(void) - } - - #ifdef CONFIG_SCHED_DEBUG -- WARN_ON_ONCE(p->migrate_disable_atomic); -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } - #endif - - WARN_ON_ONCE(p->migrate_disable <= 0); -diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c -index 66b59ac77c22..c7cb30cdd1b7 100644 ---- a/kernel/sched/swait.c -+++ b/kernel/sched/swait.c -@@ -32,6 +32,25 @@ void swake_up_locked(struct swait_queue_head *q) - } - EXPORT_SYMBOL(swake_up_locked); - -+void swake_up_all_locked(struct swait_queue_head *q) -+{ -+ struct swait_queue *curr; -+ int wakes = 0; -+ -+ while (!list_empty(&q->task_list)) { -+ -+ curr = list_first_entry(&q->task_list, typeof(*curr), -+ task_list); -+ wake_up_process(curr->task); -+ list_del_init(&curr->task_list); -+ wakes++; -+ } -+ if (pm_in_action) -+ return; -+ WARN(wakes > 2, "complete_all() with %d waiters\n", wakes); -+} -+EXPORT_SYMBOL(swake_up_all_locked); -+ - void swake_up_one(struct swait_queue_head *q) - { - unsigned long flags; -@@ -69,7 +88,7 @@ void swake_up_all(struct swait_queue_head *q) - } - EXPORT_SYMBOL(swake_up_all); - --static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) -+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) - { - wait->task = current; - if (list_empty(&wait->task_list)) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch b/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch deleted file mode 100644 index c54f8c1db..000000000 --- a/kernel/patches-4.19.x-rt/0096-fs-aio-simple-simple-work.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 4ab27b1ec5f678a5dd444c6e1d3cdff6eeabfa12 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: [PATCH 096/328] fs/aio: simple simple work - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 -|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 -|2 locks held by rcuos/2/26: -| #0: (rcu_callback){.+.+..}, at: [] rcu_nocb_kthread+0x1e2/0x380 -| #1: (rcu_read_lock_sched){.+.+..}, at: [] percpu_ref_kill_rcu+0xa6/0x1c0 -|Preemption disabled at:[] rcu_nocb_kthread+0x263/0x380 -|Call Trace: -| [] dump_stack+0x4e/0x9c -| [] __might_sleep+0xfb/0x170 -| [] rt_spin_lock+0x24/0x70 -| [] free_ioctx_users+0x30/0x130 -| [] percpu_ref_kill_rcu+0x1b4/0x1c0 -| [] rcu_nocb_kthread+0x263/0x380 -| [] kthread+0xd6/0xf0 -| [] ret_from_fork+0x7c/0xb0 - -replace this preempt_disable() friendly swork. - -Reported-By: Mike Galbraith -Suggested-by: Benjamin LaHaise -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/aio.c | 15 +++++++++++++-- - 1 file changed, 13 insertions(+), 2 deletions(-) - -diff --git a/fs/aio.c b/fs/aio.c -index b5fbf2061868..93f8cf7fdeab 100644 ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -42,6 +42,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -121,6 +122,7 @@ struct kioctx { - long nr_pages; - - struct rcu_work free_rwork; /* see free_ioctx() */ -+ struct swork_event free_swork; /* see free_ioctx() */ - - /* - * signals when all in-flight requests are done -@@ -265,6 +267,7 @@ static int __init aio_setup(void) - .mount = aio_mount, - .kill_sb = kill_anon_super, - }; -+ BUG_ON(swork_get()); - aio_mnt = kern_mount(&aio_fs); - if (IS_ERR(aio_mnt)) - panic("Failed to create aio fs mount."); -@@ -606,9 +609,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users(struct percpu_ref *ref) -+static void free_ioctx_users_work(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_swork); - struct aio_kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -626,6 +629,14 @@ static void free_ioctx_users(struct percpu_ref *ref) - percpu_ref_put(&ctx->reqs); - } - -+static void free_ioctx_users(struct percpu_ref *ref) -+{ -+ struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ -+ INIT_SWORK(&ctx->free_swork, free_ioctx_users_work); -+ swork_queue(&ctx->free_swork); -+} -+ - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) - { - unsigned i, new_nr; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch deleted file mode 100644 index 4d7bb8075..000000000 --- a/kernel/patches-4.19.x-rt/0097-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ /dev/null @@ -1,141 +0,0 @@ -From d349b691ea7fdefe94bb546a0533b63786c1857d Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 097/328] genirq: Do not invoke the affinity callback via a - workqueue on RT - -Joe Korty reported, that __irq_set_affinity_locked() schedules a -workqueue while holding a rawlock which results in a might_sleep() -warning. -This patch uses swork_queue() instead. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/interrupt.h | 6 +++++ - kernel/irq/manage.c | 46 ++++++++++++++++++++++++++++++++++++--- - 2 files changed, 49 insertions(+), 3 deletions(-) - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 315f852b4981..a943c07b54ba 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -225,6 +226,7 @@ extern void resume_device_irqs(void); - * struct irq_affinity_notify - context for notification of IRQ affinity changes - * @irq: Interrupt to which notification applies - * @kref: Reference count, for internal use -+ * @swork: Swork item, for internal use - * @work: Work item, for internal use - * @notify: Function to be called on change. This will be - * called in process context. -@@ -236,7 +238,11 @@ extern void resume_device_irqs(void); - struct irq_affinity_notify { - unsigned int irq; - struct kref kref; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct swork_event swork; -+#else - struct work_struct work; -+#endif - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); - }; -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 3c26d0708709..eadcbfbd434a 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -285,7 +285,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, - - if (desc->affinity_notify) { - kref_get(&desc->affinity_notify->kref); -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ swork_queue(&desc->affinity_notify->swork); -+#else - schedule_work(&desc->affinity_notify->work); -+#endif - } - irqd_set(data, IRQD_AFFINITY_SET); - -@@ -323,10 +328,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) - } - EXPORT_SYMBOL_GPL(irq_set_affinity_hint); - --static void irq_affinity_notify(struct work_struct *work) -+static void _irq_affinity_notify(struct irq_affinity_notify *notify) - { -- struct irq_affinity_notify *notify = -- container_of(work, struct irq_affinity_notify, work); - struct irq_desc *desc = irq_to_desc(notify->irq); - cpumask_var_t cpumask; - unsigned long flags; -@@ -348,6 +351,35 @@ static void irq_affinity_notify(struct work_struct *work) - kref_put(¬ify->kref, notify->release); - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void init_helper_thread(void) -+{ -+ static int init_sworker_once; -+ -+ if (init_sworker_once) -+ return; -+ if (WARN_ON(swork_get())) -+ return; -+ init_sworker_once = 1; -+} -+ -+static void irq_affinity_notify(struct swork_event *swork) -+{ -+ struct irq_affinity_notify *notify = -+ container_of(swork, struct irq_affinity_notify, swork); -+ _irq_affinity_notify(notify); -+} -+ -+#else -+ -+static void irq_affinity_notify(struct work_struct *work) -+{ -+ struct irq_affinity_notify *notify = -+ container_of(work, struct irq_affinity_notify, work); -+ _irq_affinity_notify(notify); -+} -+#endif -+ - /** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification -@@ -376,7 +408,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) - if (notify) { - notify->irq = irq; - kref_init(¬ify->kref); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ INIT_SWORK(¬ify->swork, irq_affinity_notify); -+ init_helper_thread(); -+#else - INIT_WORK(¬ify->work, irq_affinity_notify); -+#endif - } - - raw_spin_lock_irqsave(&desc->lock, flags); -@@ -385,7 +422,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) - raw_spin_unlock_irqrestore(&desc->lock, flags); - - if (old_notify) { -+#ifndef CONFIG_PREEMPT_RT_BASE -+ /* Need to address this for PREEMPT_RT */ - cancel_work_sync(&old_notify->work); -+#endif - kref_put(&old_notify->kref, old_notify->release); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch deleted file mode 100644 index dce3d07e0..000000000 --- a/kernel/patches-4.19.x-rt/0098-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 059e9b393e1838e4ad06a521a8e11c21e7ea7919 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 15 Nov 2017 17:29:51 +0100 -Subject: [PATCH 098/328] time/hrtimer: avoid schedule_work() with interrupts - disabled - -The NOHZ code tries to schedule a workqueue with interrupts disabled. -Since this does not work -RT I am switching it to swork instead. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/timer.c | 15 +++++++++++---- - 1 file changed, 11 insertions(+), 4 deletions(-) - -diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index 9019c9caf146..3fab1c50bf1b 100644 ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -217,8 +217,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); - static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); - static DEFINE_MUTEX(timer_keys_mutex); - --static void timer_update_keys(struct work_struct *work); --static DECLARE_WORK(timer_update_work, timer_update_keys); -+static struct swork_event timer_update_swork; - - #ifdef CONFIG_SMP - unsigned int sysctl_timer_migration = 1; -@@ -236,7 +235,7 @@ static void timers_update_migration(void) - static inline void timers_update_migration(void) { } - #endif /* !CONFIG_SMP */ - --static void timer_update_keys(struct work_struct *work) -+static void timer_update_keys(struct swork_event *event) - { - mutex_lock(&timer_keys_mutex); - timers_update_migration(); -@@ -246,9 +245,17 @@ static void timer_update_keys(struct work_struct *work) - - void timers_update_nohz(void) - { -- schedule_work(&timer_update_work); -+ swork_queue(&timer_update_swork); - } - -+static __init int hrtimer_init_thread(void) -+{ -+ WARN_ON(swork_get()); -+ INIT_SWORK(&timer_update_swork, timer_update_keys); -+ return 0; -+} -+early_initcall(hrtimer_init_thread); -+ - int timer_migration_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch deleted file mode 100644 index 3048a89a5..000000000 --- a/kernel/patches-4.19.x-rt/0099-hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ /dev/null @@ -1,289 +0,0 @@ -From 10f79182845e34e88499d6ef178e5a7e79a1f0b5 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 3 Jul 2018 11:25:41 +0200 -Subject: [PATCH 099/328] hrtimer: consolidate hrtimer_init() + - hrtimer_init_sleeper() calls - -hrtimer_init_sleeper() calls require a prior initialisation of the -hrtimer object with hrtimer_init(). Lets make the initialisation of -the hrtimer object part of hrtimer_init_sleeper(). To remain -consistent consider init_on_stack as well. - -Beside adapting the hrtimer_init_sleeper[_on_stack]() functions, call -sites need to be updated as well. - -Link: http://lkml.kernel.org/r/20180703092541.2870-1-anna-maria@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior -[anna-maria: Updating the commit message, add staging/android/vsoc.c] -Signed-off-by: Anna-Maria Gleixner ---- - block/blk-mq.c | 3 +-- - drivers/staging/android/vsoc.c | 6 ++--- - include/linux/hrtimer.h | 19 +++++++++++--- - include/linux/wait.h | 4 +-- - kernel/futex.c | 19 ++++++-------- - kernel/time/hrtimer.c | 46 ++++++++++++++++++++++++++-------- - net/core/pktgen.c | 4 +-- - 7 files changed, 67 insertions(+), 34 deletions(-) - -diff --git a/block/blk-mq.c b/block/blk-mq.c -index 684acaa96db7..4aa3284874f6 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -3128,10 +3128,9 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - kt = nsecs; - - mode = HRTIMER_MODE_REL; -- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode); -+ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current); - hrtimer_set_expires(&hs.timer, kt); - -- hrtimer_init_sleeper(&hs, current); - do { - if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) - break; -diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c -index 034d86869772..d089b2cb5dd7 100644 ---- a/drivers/staging/android/vsoc.c -+++ b/drivers/staging/android/vsoc.c -@@ -438,12 +438,10 @@ static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) - return -EINVAL; - wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); - -- hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, -- HRTIMER_MODE_ABS); -+ hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC, -+ HRTIMER_MODE_ABS, current); - hrtimer_set_expires_range_ns(&to->timer, wake_time, - current->timer_slack_ns); -- -- hrtimer_init_sleeper(to, current); - } - - while (1) { -diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h -index 542b4fa2cda9..cbd041b22088 100644 ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -364,10 +364,17 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device); - /* Initialize timers: */ - extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, - enum hrtimer_mode mode); -+extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, -+ enum hrtimer_mode mode, -+ struct task_struct *task); - - #ifdef CONFIG_DEBUG_OBJECTS_TIMERS - extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, - enum hrtimer_mode mode); -+extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, -+ clockid_t clock_id, -+ enum hrtimer_mode mode, -+ struct task_struct *task); - - extern void destroy_hrtimer_on_stack(struct hrtimer *timer); - #else -@@ -377,6 +384,15 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer, - { - hrtimer_init(timer, which_clock, mode); - } -+ -+static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, -+ clockid_t clock_id, -+ enum hrtimer_mode mode, -+ struct task_struct *task) -+{ -+ hrtimer_init_sleeper(sl, clock_id, mode, task); -+} -+ - static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } - #endif - -@@ -486,9 +502,6 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp, - const enum hrtimer_mode mode, - const clockid_t clockid); - --extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, -- struct task_struct *tsk); -- - extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, - const enum hrtimer_mode mode); - extern int schedule_hrtimeout_range_clock(ktime_t *expires, -diff --git a/include/linux/wait.h b/include/linux/wait.h -index 2b5ef8e94d19..94bd2e841de6 100644 ---- a/include/linux/wait.h -+++ b/include/linux/wait.h -@@ -489,8 +489,8 @@ do { \ - int __ret = 0; \ - struct hrtimer_sleeper __t; \ - \ -- hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \ -- hrtimer_init_sleeper(&__t, current); \ -+ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, HRTIMER_MODE_REL, \ -+ current); \ - if ((timeout) != KTIME_MAX) \ - hrtimer_start_range_ns(&__t.timer, timeout, \ - current->timer_slack_ns, \ -diff --git a/kernel/futex.c b/kernel/futex.c -index 5c8053098fc8..23e1f8a478e8 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -2704,10 +2704,9 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, - if (abs_time) { - to = &timeout; - -- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? -- CLOCK_REALTIME : CLOCK_MONOTONIC, -- HRTIMER_MODE_ABS); -- hrtimer_init_sleeper(to, current); -+ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? -+ CLOCK_REALTIME : CLOCK_MONOTONIC, -+ HRTIMER_MODE_ABS, current); - hrtimer_set_expires_range_ns(&to->timer, *abs_time, - current->timer_slack_ns); - } -@@ -2806,9 +2805,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - - if (time) { - to = &timeout; -- hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, -- HRTIMER_MODE_ABS); -- hrtimer_init_sleeper(to, current); -+ hrtimer_init_sleeper_on_stack(to, CLOCK_REALTIME, -+ HRTIMER_MODE_ABS, current); - hrtimer_set_expires(&to->timer, *time); - } - -@@ -3245,10 +3243,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - - if (abs_time) { - to = &timeout; -- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? -- CLOCK_REALTIME : CLOCK_MONOTONIC, -- HRTIMER_MODE_ABS); -- hrtimer_init_sleeper(to, current); -+ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ? -+ CLOCK_REALTIME : CLOCK_MONOTONIC, -+ HRTIMER_MODE_ABS, current); - hrtimer_set_expires_range_ns(&to->timer, *abs_time, - current->timer_slack_ns); - } -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 7362554416fd..c6f755495a63 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1651,13 +1651,44 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) - return HRTIMER_NORESTART; - } - --void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) -+static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, -+ clockid_t clock_id, -+ enum hrtimer_mode mode, -+ struct task_struct *task) - { -+ __hrtimer_init(&sl->timer, clock_id, mode); - sl->timer.function = hrtimer_wakeup; - sl->task = task; - } -+ -+/** -+ * hrtimer_init_sleeper - initialize sleeper to the given clock -+ * @sl: sleeper to be initialized -+ * @clock_id: the clock to be used -+ * @mode: timer mode abs/rel -+ * @task: the task to wake up -+ */ -+void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, -+ enum hrtimer_mode mode, struct task_struct *task) -+{ -+ debug_init(&sl->timer, clock_id, mode); -+ __hrtimer_init_sleeper(sl, clock_id, mode, task); -+ -+} - EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); - -+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS -+void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, -+ clockid_t clock_id, -+ enum hrtimer_mode mode, -+ struct task_struct *task) -+{ -+ debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr); -+ __hrtimer_init_sleeper(sl, clock_id, mode, task); -+} -+EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack); -+#endif -+ - int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) - { - switch(restart->nanosleep.type) { -@@ -1681,8 +1712,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod - { - struct restart_block *restart; - -- hrtimer_init_sleeper(t, current); -- - do { - set_current_state(TASK_INTERRUPTIBLE); - hrtimer_start_expires(&t->timer, mode); -@@ -1719,10 +1748,9 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) - struct hrtimer_sleeper t; - int ret; - -- hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, -- HRTIMER_MODE_ABS); -+ hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid, -+ HRTIMER_MODE_ABS, current); - hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); -- - ret = do_nanosleep(&t, HRTIMER_MODE_ABS); - destroy_hrtimer_on_stack(&t.timer); - return ret; -@@ -1740,7 +1768,7 @@ long hrtimer_nanosleep(const struct timespec64 *rqtp, - if (dl_task(current) || rt_task(current)) - slack = 0; - -- hrtimer_init_on_stack(&t.timer, clockid, mode); -+ hrtimer_init_sleeper_on_stack(&t, clockid, mode, current); - hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); - ret = do_nanosleep(&t, mode); - if (ret != -ERESTART_RESTARTBLOCK) -@@ -1939,11 +1967,9 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, - return -EINTR; - } - -- hrtimer_init_on_stack(&t.timer, clock_id, mode); -+ hrtimer_init_sleeper_on_stack(&t, clock_id, mode, current); - hrtimer_set_expires_range_ns(&t.timer, *expires, delta); - -- hrtimer_init_sleeper(&t, current); -- - hrtimer_start_expires(&t.timer, mode); - - if (likely(t.task)) -diff --git a/net/core/pktgen.c b/net/core/pktgen.c -index 092fa3d75b32..9d472d626aaa 100644 ---- a/net/core/pktgen.c -+++ b/net/core/pktgen.c -@@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) - s64 remaining; - struct hrtimer_sleeper t; - -- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS, -+ current); - hrtimer_set_expires(&t.timer, spin_until); - - remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); -@@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) - } while (ktime_compare(end_time, spin_until) < 0); - } else { - /* see do_nanosleep */ -- hrtimer_init_sleeper(&t, current); - do { - set_current_state(TASK_INTERRUPTIBLE); - hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch b/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch deleted file mode 100644 index 757d56dfe..000000000 --- a/kernel/patches-4.19.x-rt/0100-hrtimers-Prepare-full-preemption.patch +++ /dev/null @@ -1,289 +0,0 @@ -From 46ce0d2ba1077d583aa36bef4e8ffb712246bf2e Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:34 -0500 -Subject: [PATCH 100/328] hrtimers: Prepare full preemption - -Make cancellation of a running callback in softirq context safe -against preemption. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - fs/timerfd.c | 5 ++++- - include/linux/hrtimer.h | 13 +++++++++++- - include/linux/posix-timers.h | 2 +- - kernel/time/alarmtimer.c | 2 +- - kernel/time/hrtimer.c | 33 +++++++++++++++++++++++++++++- - kernel/time/itimer.c | 1 + - kernel/time/posix-timers.c | 39 ++++++++++++++++++++++++++++++++++-- - 7 files changed, 88 insertions(+), 7 deletions(-) - -diff --git a/fs/timerfd.c b/fs/timerfd.c -index d69ad801eb80..82d0f52414a6 100644 ---- a/fs/timerfd.c -+++ b/fs/timerfd.c -@@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, int flags, - break; - } - spin_unlock_irq(&ctx->wqh.lock); -- cpu_relax(); -+ if (isalarm(ctx)) -+ hrtimer_wait_for_timer(&ctx->t.alarm.timer); -+ else -+ hrtimer_wait_for_timer(&ctx->t.tmr); - } - - /* -diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h -index cbd041b22088..8714f1a37d84 100644 ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - struct hrtimer_clock_base; - struct hrtimer_cpu_base; -@@ -216,6 +217,9 @@ struct hrtimer_cpu_base { - ktime_t expires_next; - struct hrtimer *next_timer; - ktime_t softirq_expires_next; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ wait_queue_head_t wait; -+#endif - struct hrtimer *softirq_next_timer; - struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - } ____cacheline_aligned; -@@ -433,6 +437,13 @@ static inline void hrtimer_restart(struct hrtimer *timer) - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - -+/* Softirq preemption could deadlock timer removal */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); -+#else -+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) -+#endif -+ - /* Query timers: */ - extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); - -@@ -464,7 +475,7 @@ static inline bool hrtimer_is_queued(struct hrtimer *timer) - * Helper function to check, whether the timer is running the callback - * function - */ --static inline int hrtimer_callback_running(struct hrtimer *timer) -+static inline int hrtimer_callback_running(const struct hrtimer *timer) - { - return timer->base->running == timer; - } -diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h -index ee7e987ea1b4..0571b498db73 100644 ---- a/include/linux/posix-timers.h -+++ b/include/linux/posix-timers.h -@@ -114,8 +114,8 @@ struct k_itimer { - struct { - struct alarm alarmtimer; - } alarm; -- struct rcu_head rcu; - } it; -+ struct rcu_head rcu; - }; - - void run_posix_cpu_timers(struct task_struct *task); -diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index 9eece67f29f3..a465564367ec 100644 ---- a/kernel/time/alarmtimer.c -+++ b/kernel/time/alarmtimer.c -@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm) - int ret = alarm_try_to_cancel(alarm); - if (ret >= 0) - return ret; -- cpu_relax(); -+ hrtimer_wait_for_timer(&alarm->timer); - } - } - EXPORT_SYMBOL_GPL(alarm_cancel); -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index c6f755495a63..e135cb27db39 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -939,6 +939,33 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) - } - EXPORT_SYMBOL_GPL(hrtimer_forward); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define wake_up_timer_waiters(b) wake_up(&(b)->wait) -+ -+/** -+ * hrtimer_wait_for_timer - Wait for a running timer -+ * -+ * @timer: timer to wait for -+ * -+ * The function waits in case the timers callback function is -+ * currently executed on the waitqueue of the timer base. The -+ * waitqueue is woken up after the timer callback function has -+ * finished execution. -+ */ -+void hrtimer_wait_for_timer(const struct hrtimer *timer) -+{ -+ struct hrtimer_clock_base *base = timer->base; -+ -+ if (base && base->cpu_base && -+ base->index >= HRTIMER_BASE_MONOTONIC_SOFT) -+ wait_event(base->cpu_base->wait, -+ !(hrtimer_callback_running(timer))); -+} -+ -+#else -+# define wake_up_timer_waiters(b) do { } while (0) -+#endif -+ - /* - * enqueue_hrtimer - internal function to (re)start a timer - * -@@ -1174,7 +1201,7 @@ int hrtimer_cancel(struct hrtimer *timer) - - if (ret >= 0) - return ret; -- cpu_relax(); -+ hrtimer_wait_for_timer(timer); - } - } - EXPORT_SYMBOL_GPL(hrtimer_cancel); -@@ -1480,6 +1507,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) - hrtimer_update_softirq_timer(cpu_base, true); - - raw_spin_unlock_irqrestore(&cpu_base->lock, flags); -+ wake_up_timer_waiters(cpu_base); - } - - #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1849,6 +1877,9 @@ int hrtimers_prepare_cpu(unsigned int cpu) - cpu_base->softirq_next_timer = NULL; - cpu_base->expires_next = KTIME_MAX; - cpu_base->softirq_expires_next = KTIME_MAX; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ init_waitqueue_head(&cpu_base->wait); -+#endif - return 0; - } - -diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c -index 9a65713c8309..55b0e58368bf 100644 ---- a/kernel/time/itimer.c -+++ b/kernel/time/itimer.c -@@ -215,6 +215,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) - /* We are sharing ->siglock with it_real_fn() */ - if (hrtimer_try_to_cancel(timer) < 0) { - spin_unlock_irq(&tsk->sighand->siglock); -+ hrtimer_wait_for_timer(&tsk->signal->real_timer); - goto again; - } - expires = timeval_to_ktime(value->it_value); -diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c -index 5a01c4fdbfef..a5ec421e3437 100644 ---- a/kernel/time/posix-timers.c -+++ b/kernel/time/posix-timers.c -@@ -463,7 +463,7 @@ static struct k_itimer * alloc_posix_timer(void) - - static void k_itimer_rcu_free(struct rcu_head *head) - { -- struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); -+ struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); - - kmem_cache_free(posix_timers_cache, tmr); - } -@@ -480,7 +480,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) - } - put_pid(tmr->it_pid); - sigqueue_free(tmr->sigq); -- call_rcu(&tmr->it.rcu, k_itimer_rcu_free); -+ call_rcu(&tmr->rcu, k_itimer_rcu_free); - } - - static int common_timer_create(struct k_itimer *new_timer) -@@ -821,6 +821,22 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - -+/* -+ * Protected by RCU! -+ */ -+static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timr) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (kc->timer_arm == common_hrtimer_arm) -+ hrtimer_wait_for_timer(&timr->it.real.timer); -+ else if (kc == &alarm_clock) -+ hrtimer_wait_for_timer(&timr->it.alarm.alarmtimer.timer); -+ else -+ /* FIXME: Whacky hack for posix-cpu-timers */ -+ schedule_timeout(1); -+#endif -+} -+ - static int common_hrtimer_try_to_cancel(struct k_itimer *timr) - { - return hrtimer_try_to_cancel(&timr->it.real.timer); -@@ -885,6 +901,7 @@ static int do_timer_settime(timer_t timer_id, int flags, - if (!timr) - return -EINVAL; - -+ rcu_read_lock(); - kc = timr->kclock; - if (WARN_ON_ONCE(!kc || !kc->timer_set)) - error = -EINVAL; -@@ -893,9 +910,12 @@ static int do_timer_settime(timer_t timer_id, int flags, - - unlock_timer(timr, flag); - if (error == TIMER_RETRY) { -+ timer_wait_for_callback(kc, timr); - old_spec64 = NULL; // We already got the old time... -+ rcu_read_unlock(); - goto retry; - } -+ rcu_read_unlock(); - - return error; - } -@@ -977,10 +997,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) - if (!timer) - return -EINVAL; - -+ rcu_read_lock(); - if (timer_delete_hook(timer) == TIMER_RETRY) { - unlock_timer(timer, flags); -+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -+ timer); -+ rcu_read_unlock(); - goto retry_delete; - } -+ rcu_read_unlock(); - - spin_lock(¤t->sighand->siglock); - list_del(&timer->list); -@@ -1006,8 +1031,18 @@ static void itimer_delete(struct k_itimer *timer) - retry_delete: - spin_lock_irqsave(&timer->it_lock, flags); - -+ /* On RT we can race with a deletion */ -+ if (!timer->it_signal) { -+ unlock_timer(timer, flags); -+ return; -+ } -+ - if (timer_delete_hook(timer) == TIMER_RETRY) { -+ rcu_read_lock(); - unlock_timer(timer, flags); -+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -+ timer); -+ rcu_read_unlock(); - goto retry_delete; - } - list_del(&timer->list); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch b/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch deleted file mode 100644 index e4612188c..000000000 --- a/kernel/patches-4.19.x-rt/0101-hrtimer-by-timers-by-default-into-the-softirq-contex.patch +++ /dev/null @@ -1,236 +0,0 @@ -From 11c94409951908175bc4fab549d79ee48bcbc102 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 3 Jul 2009 08:44:31 -0500 -Subject: [PATCH 101/328] hrtimer: by timers by default into the softirq - context - -We can't have hrtimers callbacks running in hardirq context on RT. Therefore -the timers are deferred to the softirq context by default. -There are few timers which expect to be run in hardirq context even on RT. -Those are: -- very short running where low latency is critical (kvm lapic) -- timers which take raw locks and need run in hard-irq context (perf, sched) -- wake up related timer (kernel side of clock_nanosleep() and so on) - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/kvm/lapic.c | 2 +- - include/linux/hrtimer.h | 6 ++++++ - kernel/events/core.c | 4 ++-- - kernel/sched/core.c | 2 +- - kernel/sched/deadline.c | 2 +- - kernel/sched/fair.c | 4 ++-- - kernel/sched/rt.c | 4 ++-- - kernel/time/hrtimer.c | 21 +++++++++++++++++++-- - kernel/time/tick-broadcast-hrtimer.c | 2 +- - kernel/time/tick-sched.c | 2 +- - kernel/watchdog.c | 2 +- - 11 files changed, 37 insertions(+), 14 deletions(-) - -diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c -index 05905961ecca..9f08b74cda59 100644 ---- a/arch/x86/kvm/lapic.c -+++ b/arch/x86/kvm/lapic.c -@@ -2262,7 +2262,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) - apic->vcpu = vcpu; - - hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, -- HRTIMER_MODE_ABS_PINNED); -+ HRTIMER_MODE_ABS_PINNED_HARD); - apic->lapic_timer.timer.function = apic_timer_fn; - - /* -diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h -index 8714f1a37d84..082147c07831 100644 ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -42,6 +42,7 @@ enum hrtimer_mode { - HRTIMER_MODE_REL = 0x01, - HRTIMER_MODE_PINNED = 0x02, - HRTIMER_MODE_SOFT = 0x04, -+ HRTIMER_MODE_HARD = 0x08, - - HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, - HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, -@@ -52,6 +53,11 @@ enum hrtimer_mode { - HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, - HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, - -+ HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, -+ HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, -+ -+ HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, -+ HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, - }; - - /* -diff --git a/kernel/events/core.c b/kernel/events/core.c -index 8c70ee23fbe9..9804b1a8b0fa 100644 ---- a/kernel/events/core.c -+++ b/kernel/events/core.c -@@ -1102,7 +1102,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) - cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); - - raw_spin_lock_init(&cpuctx->hrtimer_lock); -- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); -+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); - timer->function = perf_mux_hrtimer_handler; - } - -@@ -9269,7 +9269,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) - if (!is_sampling_event(event)) - return; - -- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - hwc->hrtimer.function = perf_swevent_hrtimer; - - /* -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 584978640512..2f6b4365d070 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq) - rq->hrtick_csd.info = rq; - #endif - -- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - rq->hrtick_timer.function = hrtick; - } - #else /* CONFIG_SCHED_HRTICK */ -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index 4b13df38c069..974a8f9b615a 100644 ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -1086,7 +1086,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) - { - struct hrtimer *timer = &dl_se->dl_timer; - -- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - timer->function = dl_task_timer; - } - -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 27f9f9a785c1..d801e6c763ee 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -4930,9 +4930,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) - cfs_b->period = ns_to_ktime(default_cfs_period()); - - INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); -- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); -+ hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); - cfs_b->period_timer.function = sched_cfs_period_timer; -- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - cfs_b->slack_timer.function = sched_cfs_slack_timer; - cfs_b->distribute_running = 0; - } -diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c -index b6ca4a630050..aeb99395c03b 100644 ---- a/kernel/sched/rt.c -+++ b/kernel/sched/rt.c -@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) - - raw_spin_lock_init(&rt_b->rt_runtime_lock); - -- hrtimer_init(&rt_b->rt_period_timer, -- CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, -+ HRTIMER_MODE_REL_HARD); - rt_b->rt_period_timer.function = sched_rt_period_timer; - } - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index e135cb27db39..1fd5ec39e7f4 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1138,7 +1138,9 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, - * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft - * match. - */ -+#ifndef CONFIG_PREEMPT_RT_BASE - WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft); -+#endif - - base = lock_hrtimer_base(timer, &flags); - -@@ -1298,10 +1300,17 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id) - static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, - enum hrtimer_mode mode) - { -- bool softtimer = !!(mode & HRTIMER_MODE_SOFT); -- int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; -+ bool softtimer; -+ int base; - struct hrtimer_cpu_base *cpu_base; - -+ softtimer = !!(mode & HRTIMER_MODE_SOFT); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (!softtimer && !(mode & HRTIMER_MODE_HARD)) -+ softtimer = true; -+#endif -+ base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0; -+ - memset(timer, 0, sizeof(struct hrtimer)); - - cpu_base = raw_cpu_ptr(&hrtimer_bases); -@@ -1684,6 +1693,14 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, - enum hrtimer_mode mode, - struct task_struct *task) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (!(mode & (HRTIMER_MODE_SOFT | HRTIMER_MODE_HARD))) { -+ if (task_is_realtime(current) || system_state != SYSTEM_RUNNING) -+ mode |= HRTIMER_MODE_HARD; -+ else -+ mode |= HRTIMER_MODE_SOFT; -+ } -+#endif - __hrtimer_init(&sl->timer, clock_id, mode); - sl->timer.function = hrtimer_wakeup; - sl->task = task; -diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c -index a836efd34589..c50e8f3262de 100644 ---- a/kernel/time/tick-broadcast-hrtimer.c -+++ b/kernel/time/tick-broadcast-hrtimer.c -@@ -107,7 +107,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) - - void tick_setup_hrtimer_broadcast(void) - { -- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - bctimer.function = bc_handler; - clockevents_register_device(&ce_broadcast_hrtimer); - } -diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index e774a49176cc..012bc81879bf 100644 ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -1314,7 +1314,7 @@ void tick_setup_sched_timer(void) - /* - * Emulate tick processing via per-CPU hrtimers: - */ -- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - ts->sched_timer.function = tick_sched_timer; - - /* Get the next period (per-CPU) */ -diff --git a/kernel/watchdog.c b/kernel/watchdog.c -index 6d60701dc636..328620fe85f6 100644 ---- a/kernel/watchdog.c -+++ b/kernel/watchdog.c -@@ -485,7 +485,7 @@ static void watchdog_enable(unsigned int cpu) - * Start the timer first to prevent the NMI watchdog triggering - * before the timer has a chance to fire. - */ -- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - hrtimer->function = watchdog_timer_fn; - hrtimer_start(hrtimer, ns_to_ktime(sample_period), - HRTIMER_MODE_REL_PINNED); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch b/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch deleted file mode 100644 index c9aa6dea6..000000000 --- a/kernel/patches-4.19.x-rt/0102-sched-fair-Make-the-hrtimers-non-hard-again.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 80511ff61a43b8db5ac26d665ef32b73cbc03471 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 8 Jan 2019 12:31:06 +0100 -Subject: [PATCH 102/328] sched/fair: Make the hrtimers non-hard again - -Since commit "sched/fair: Robustify CFS-bandwidth timer locking" both -hrtimer can run in softirq context because now interrupts are disabled -as part of the locking procedure. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/fair.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index d801e6c763ee..27f9f9a785c1 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -4930,9 +4930,9 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) - cfs_b->period = ns_to_ktime(default_cfs_period()); - - INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); -- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); -+ hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); - cfs_b->period_timer.function = sched_cfs_period_timer; -- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); -+ hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - cfs_b->slack_timer.function = sched_cfs_slack_timer; - cfs_b->distribute_running = 0; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch b/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch deleted file mode 100644 index 308d5d0ae..000000000 --- a/kernel/patches-4.19.x-rt/0103-hrtimer-Move-schedule_work-call-to-helper-thread.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 78baf86e1ae9ffb52fdbd93f63afc3fb3ecc3f26 Mon Sep 17 00:00:00 2001 -From: Yang Shi -Date: Mon, 16 Sep 2013 14:09:19 -0700 -Subject: [PATCH 103/328] hrtimer: Move schedule_work call to helper thread - -When run ltp leapsec_timer test, the following call trace is caught: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:659 -in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1 -Preemption disabled at:[] cpu_startup_entry+0x133/0x310 - -CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2 -Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010 -ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58 -ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0 -ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200 -Call Trace: - [] dump_stack+0x19/0x1b -[] __might_sleep+0xf1/0x170 -[] rt_spin_lock+0x20/0x50 -[] queue_work_on+0x61/0x100 -[] clock_was_set_delayed+0x21/0x30 -[] do_timer+0x40e/0x660 -[] tick_do_update_jiffies64+0xf7/0x140 -[] tick_check_idle+0x92/0xc0 -[] irq_enter+0x57/0x70 -[] smp_apic_timer_interrupt+0x3e/0x9b -[] apic_timer_interrupt+0x6a/0x70 - [] ? cpuidle_enter_state+0x4c/0xc0 -[] cpuidle_idle_call+0xd8/0x2d0 -[] arch_cpu_idle+0xe/0x30 -[] cpu_startup_entry+0x19e/0x310 -[] start_secondary+0x1ad/0x1b0 - -The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which -calls schedule_work. - -Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's -not safe to call schedule_work in interrupt context. - -Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca -(rt,ntp: Move call to schedule_delayed_work() to helper thread) -from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which -makes a similar change. - -Signed-off-by: Yang Shi -[bigeasy: use swork_queue() instead a helper thread] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 24 ++++++++++++++++++++++++ - 1 file changed, 24 insertions(+) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 1fd5ec39e7f4..9f3412acdb16 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -730,6 +730,29 @@ static void hrtimer_switch_to_hres(void) - retrigger_next_event(NULL); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+static struct swork_event clock_set_delay_work; -+ -+static void run_clock_set_delay(struct swork_event *event) -+{ -+ clock_was_set(); -+} -+ -+void clock_was_set_delayed(void) -+{ -+ swork_queue(&clock_set_delay_work); -+} -+ -+static __init int create_clock_set_delay_thread(void) -+{ -+ WARN_ON(swork_get()); -+ INIT_SWORK(&clock_set_delay_work, run_clock_set_delay); -+ return 0; -+} -+early_initcall(create_clock_set_delay_thread); -+#else /* PREEMPT_RT_FULL */ -+ - static void clock_was_set_work(struct work_struct *work) - { - clock_was_set(); -@@ -745,6 +768,7 @@ void clock_was_set_delayed(void) - { - schedule_work(&hrtimer_work); - } -+#endif - - #else - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch deleted file mode 100644 index 8ed3b2add..000000000 --- a/kernel/patches-4.19.x-rt/0104-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 4cb6829518eaf4e33821ad23eca2672efd3c8292 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 6 Dec 2018 10:15:13 +0100 -Subject: [PATCH 104/328] hrtimer: move state change before hrtimer_cancel in - do_nanosleep() - -There is a small window between setting t->task to NULL and waking the -task up (which would set TASK_RUNNING). So the timer would fire, run and -set ->task to NULL while the other side/do_nanosleep() wouldn't enter -freezable_schedule(). After all we are peemptible here (in -do_nanosleep() and on the timer wake up path) and on KVM/virt the -virt-CPU might get preempted. -So do_nanosleep() wouldn't enter freezable_schedule() but cancel the -timer which is still running and wait for it via -hrtimer_wait_for_timer(). Then wait_event()/might_sleep() would complain -that it is invoked with state != TASK_RUNNING. -This isn't a problem since it would be reset to TASK_RUNNING later -anyway and we don't rely on the previous state. - -Move the state update to TASK_RUNNING before hrtimer_cancel() so there -are no complains from might_sleep() about wrong state. - -Cc: stable-rt@vger.kernel.org -Reviewed-by: Daniel Bristot de Oliveira -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 9f3412acdb16..b800efb64238 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1788,12 +1788,12 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod - if (likely(t->task)) - freezable_schedule(); - -+ __set_current_state(TASK_RUNNING); - hrtimer_cancel(&t->timer); - mode = HRTIMER_MODE_ABS; - - } while (t->task && !signal_pending(current)); - -- __set_current_state(TASK_RUNNING); - - if (!t->task) - return 0; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch b/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch deleted file mode 100644 index dfd42f230..000000000 --- a/kernel/patches-4.19.x-rt/0105-posix-timers-Thread-posix-cpu-timers-on-rt.patch +++ /dev/null @@ -1,268 +0,0 @@ -From 502c5fa30dc1a4c295e2f42049d7c79b0245db74 Mon Sep 17 00:00:00 2001 -From: John Stultz -Date: Fri, 3 Jul 2009 08:29:58 -0500 -Subject: [PATCH 105/328] posix-timers: Thread posix-cpu-timers on -rt - -posix-cpu-timer code takes non -rt safe locks in hard irq -context. Move it to a thread. - -[ 3.0 fixes from Peter Zijlstra ] - -Signed-off-by: John Stultz -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 3 + - init/init_task.c | 7 ++ - kernel/fork.c | 3 + - kernel/time/posix-cpu-timers.c | 154 ++++++++++++++++++++++++++++++++- - 4 files changed, 164 insertions(+), 3 deletions(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index e4af260f81c5..a90b6be626cd 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -832,6 +832,9 @@ struct task_struct { - #ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *posix_timer_list; -+#endif - #endif - - /* Process credentials: */ -diff --git a/init/init_task.c b/init/init_task.c -index 0b49b9cf5571..9e3362748214 100644 ---- a/init/init_task.c -+++ b/init/init_task.c -@@ -50,6 +50,12 @@ static struct sighand_struct init_sighand = { - .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), - }; - -+#if defined(CONFIG_POSIX_TIMERS) && defined(CONFIG_PREEMPT_RT_BASE) -+# define INIT_TIMER_LIST .posix_timer_list = NULL, -+#else -+# define INIT_TIMER_LIST -+#endif -+ - /* - * Set up the first task table, touch at your own risk!. Base=0, - * limit=0x1fffff (=2MB) -@@ -119,6 +125,7 @@ struct task_struct init_task - INIT_CPU_TIMERS(init_task) - .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), - .timer_slack_ns = 50000, /* 50 usec default slack */ -+ INIT_TIMER_LIST - .thread_pid = &init_struct_pid, - .thread_group = LIST_HEAD_INIT(init_task.thread_group), - .thread_node = LIST_HEAD_INIT(init_signals.thread_head), -diff --git a/kernel/fork.c b/kernel/fork.c -index bc182d6fa2a9..ccfcd44a370f 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1590,6 +1590,9 @@ static void rt_mutex_init_task(struct task_struct *p) - */ - static void posix_cpu_timers_init(struct task_struct *tsk) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ tsk->posix_timer_list = NULL; -+#endif - tsk->cputime_expires.prof_exp = 0; - tsk->cputime_expires.virt_exp = 0; - tsk->cputime_expires.sched_exp = 0; -diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c -index d62d7ae5201c..8d95e8de98b2 100644 ---- a/kernel/time/posix-cpu-timers.c -+++ b/kernel/time/posix-cpu-timers.c -@@ -3,8 +3,10 @@ - * Implement CPU time clocks for the POSIX clock interface. - */ - -+#include - #include - #include -+#include - #include - #include - #include -@@ -15,6 +17,7 @@ - #include - #include - #include -+#include - - #include "posix-timers.h" - -@@ -1140,14 +1143,12 @@ static inline int fastpath_timer_check(struct task_struct *tsk) - * already updated our counts. We need to check if any timers fire now. - * Interrupts are disabled. - */ --void run_posix_cpu_timers(struct task_struct *tsk) -+static void __run_posix_cpu_timers(struct task_struct *tsk) - { - LIST_HEAD(firing); - struct k_itimer *timer, *next; - unsigned long flags; - -- lockdep_assert_irqs_disabled(); -- - /* - * The fast path checks that there are no expired thread or thread - * group timers. If that's so, just return. -@@ -1200,6 +1201,153 @@ void run_posix_cpu_timers(struct task_struct *tsk) - } - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+#include -+#include -+DEFINE_PER_CPU(struct task_struct *, posix_timer_task); -+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); -+DEFINE_PER_CPU(bool, posix_timer_th_active); -+ -+static void posix_cpu_kthread_fn(unsigned int cpu) -+{ -+ struct task_struct *tsk = NULL; -+ struct task_struct *next = NULL; -+ -+ BUG_ON(per_cpu(posix_timer_task, cpu) != current); -+ -+ /* grab task list */ -+ raw_local_irq_disable(); -+ tsk = per_cpu(posix_timer_tasklist, cpu); -+ per_cpu(posix_timer_tasklist, cpu) = NULL; -+ raw_local_irq_enable(); -+ -+ /* its possible the list is empty, just return */ -+ if (!tsk) -+ return; -+ -+ /* Process task list */ -+ while (1) { -+ /* save next */ -+ next = tsk->posix_timer_list; -+ -+ /* run the task timers, clear its ptr and -+ * unreference it -+ */ -+ __run_posix_cpu_timers(tsk); -+ tsk->posix_timer_list = NULL; -+ put_task_struct(tsk); -+ -+ /* check if this is the last on the list */ -+ if (next == tsk) -+ break; -+ tsk = next; -+ } -+} -+ -+static inline int __fastpath_timer_check(struct task_struct *tsk) -+{ -+ /* tsk == current, ensure it is safe to use ->signal/sighand */ -+ if (unlikely(tsk->exit_state)) -+ return 0; -+ -+ if (!task_cputime_zero(&tsk->cputime_expires)) -+ return 1; -+ -+ if (!task_cputime_zero(&tsk->signal->cputime_expires)) -+ return 1; -+ -+ return 0; -+} -+ -+void run_posix_cpu_timers(struct task_struct *tsk) -+{ -+ unsigned int cpu = smp_processor_id(); -+ struct task_struct *tasklist; -+ -+ BUG_ON(!irqs_disabled()); -+ -+ if (per_cpu(posix_timer_th_active, cpu) != true) -+ return; -+ -+ /* get per-cpu references */ -+ tasklist = per_cpu(posix_timer_tasklist, cpu); -+ -+ /* check to see if we're already queued */ -+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { -+ get_task_struct(tsk); -+ if (tasklist) { -+ tsk->posix_timer_list = tasklist; -+ } else { -+ /* -+ * The list is terminated by a self-pointing -+ * task_struct -+ */ -+ tsk->posix_timer_list = tsk; -+ } -+ per_cpu(posix_timer_tasklist, cpu) = tsk; -+ -+ wake_up_process(per_cpu(posix_timer_task, cpu)); -+ } -+} -+ -+static int posix_cpu_kthread_should_run(unsigned int cpu) -+{ -+ return __this_cpu_read(posix_timer_tasklist) != NULL; -+} -+ -+static void posix_cpu_kthread_park(unsigned int cpu) -+{ -+ this_cpu_write(posix_timer_th_active, false); -+} -+ -+static void posix_cpu_kthread_unpark(unsigned int cpu) -+{ -+ this_cpu_write(posix_timer_th_active, true); -+} -+ -+static void posix_cpu_kthread_setup(unsigned int cpu) -+{ -+ struct sched_param sp; -+ -+ sp.sched_priority = MAX_RT_PRIO - 1; -+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -+ posix_cpu_kthread_unpark(cpu); -+} -+ -+static struct smp_hotplug_thread posix_cpu_thread = { -+ .store = &posix_timer_task, -+ .thread_should_run = posix_cpu_kthread_should_run, -+ .thread_fn = posix_cpu_kthread_fn, -+ .thread_comm = "posixcputmr/%u", -+ .setup = posix_cpu_kthread_setup, -+ .park = posix_cpu_kthread_park, -+ .unpark = posix_cpu_kthread_unpark, -+}; -+ -+static int __init posix_cpu_thread_init(void) -+{ -+ /* Start one for boot CPU. */ -+ unsigned long cpu; -+ int ret; -+ -+ /* init the per-cpu posix_timer_tasklets */ -+ for_each_possible_cpu(cpu) -+ per_cpu(posix_timer_tasklist, cpu) = NULL; -+ -+ ret = smpboot_register_percpu_thread(&posix_cpu_thread); -+ WARN_ON(ret); -+ -+ return 0; -+} -+early_initcall(posix_cpu_thread_init); -+#else /* CONFIG_PREEMPT_RT_BASE */ -+void run_posix_cpu_timers(struct task_struct *tsk) -+{ -+ lockdep_assert_irqs_disabled(); -+ __run_posix_cpu_timers(tsk); -+} -+#endif /* CONFIG_PREEMPT_RT_BASE */ -+ - /* - * Set one of the process-wide special case CPU timers or RLIMIT_CPU. - * The tsk->sighand->siglock must be held by the caller. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch b/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch deleted file mode 100644 index 59d68b25d..000000000 --- a/kernel/patches-4.19.x-rt/0106-sched-Move-task_struct-cleanup-to-RCU.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 11ee26b0db121f202fdb0adaea13a37ae4ee31f9 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 31 May 2011 16:59:16 +0200 -Subject: [PATCH 106/328] sched: Move task_struct cleanup to RCU - -__put_task_struct() does quite some expensive work. We don't want to -burden random tasks with that. - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 3 +++ - include/linux/sched/task.h | 11 ++++++++++- - kernel/fork.c | 15 ++++++++++++++- - 3 files changed, 27 insertions(+), 2 deletions(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index a90b6be626cd..0b8850b6093b 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1194,6 +1194,9 @@ struct task_struct { - unsigned int sequential_io; - unsigned int sequential_io_avg; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head put_rcu; -+#endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; - #endif -diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h -index 44c6f15800ff..d2b33e57c636 100644 ---- a/include/linux/sched/task.h -+++ b/include/linux/sched/task.h -@@ -90,6 +90,15 @@ extern void sched_exec(void); - - #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __put_task_struct_cb(struct rcu_head *rhp); -+ -+static inline void put_task_struct(struct task_struct *t) -+{ -+ if (atomic_dec_and_test(&t->usage)) -+ call_rcu(&t->put_rcu, __put_task_struct_cb); -+} -+#else - extern void __put_task_struct(struct task_struct *t); - - static inline void put_task_struct(struct task_struct *t) -@@ -97,7 +106,7 @@ static inline void put_task_struct(struct task_struct *t) - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); - } -- -+#endif - struct task_struct *task_rcu_dereference(struct task_struct **ptask); - - #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT -diff --git a/kernel/fork.c b/kernel/fork.c -index ccfcd44a370f..309f4a20d4ac 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -676,7 +676,9 @@ static inline void put_signal_struct(struct signal_struct *sig) - if (atomic_dec_and_test(&sig->sigcnt)) - free_signal_struct(sig); - } -- -+#ifdef CONFIG_PREEMPT_RT_BASE -+static -+#endif - void __put_task_struct(struct task_struct *tsk) - { - WARN_ON(!tsk->exit_state); -@@ -693,7 +695,18 @@ void __put_task_struct(struct task_struct *tsk) - if (!profile_handoff_task(tsk)) - free_task(tsk); - } -+#ifndef CONFIG_PREEMPT_RT_BASE - EXPORT_SYMBOL_GPL(__put_task_struct); -+#else -+void __put_task_struct_cb(struct rcu_head *rhp) -+{ -+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); -+ -+ __put_task_struct(tsk); -+ -+} -+EXPORT_SYMBOL_GPL(__put_task_struct_cb); -+#endif - - void __init __weak arch_task_cache_init(void) { } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch b/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch deleted file mode 100644 index 82e563c9a..000000000 --- a/kernel/patches-4.19.x-rt/0107-sched-Limit-the-number-of-task-migrations-per-batch.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 1ecb69a3b71549668399060338d4e216c3886d67 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 6 Jun 2011 12:12:51 +0200 -Subject: [PATCH 107/328] sched: Limit the number of task migrations per batch - -Put an upper limit on the number of tasks which are migrated per batch -to avoid large latencies. - -Signed-off-by: Thomas Gleixner ---- - kernel/sched/core.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 2f6b4365d070..5c23d1272429 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_features = - * Number of tasks to iterate in a single balance run. - * Limited because this is done with IRQs disabled. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+const_debug unsigned int sysctl_sched_nr_migrate = 8; -+#else - const_debug unsigned int sysctl_sched_nr_migrate = 32; -+#endif - - /* - * period over which we measure -rt task CPU usage in us. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch b/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch deleted file mode 100644 index a494f4290..000000000 --- a/kernel/patches-4.19.x-rt/0108-sched-Move-mmdrop-to-RCU-on-RT.patch +++ /dev/null @@ -1,139 +0,0 @@ -From 81ab4eb4f9ca9097217f44a6961e60ebb82725b1 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 6 Jun 2011 12:20:33 +0200 -Subject: [PATCH 108/328] sched: Move mmdrop to RCU on RT - -Takes sleeping locks and calls into the memory allocator, so nothing -we want to do in task switch and oder atomic contexts. - -Signed-off-by: Thomas Gleixner ---- - include/linux/mm_types.h | 4 ++++ - include/linux/sched/mm.h | 11 +++++++++++ - kernel/fork.c | 13 +++++++++++++ - kernel/sched/core.c | 18 ++++++++++++++++-- - 4 files changed, 44 insertions(+), 2 deletions(-) - -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 3a9a996af229..202b736ccbfa 100644 ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -12,6 +12,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -487,6 +488,9 @@ struct mm_struct { - bool tlb_flush_batched; - #endif - struct uprobes_state uprobes_state; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head delayed_drop; -+#endif - #ifdef CONFIG_HUGETLB_PAGE - atomic_long_t hugetlb_usage; - #endif -diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h -index e9d4e389aed9..fb59f96fdd2e 100644 ---- a/include/linux/sched/mm.h -+++ b/include/linux/sched/mm.h -@@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm) - __mmdrop(mm); - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __mmdrop_delayed(struct rcu_head *rhp); -+static inline void mmdrop_delayed(struct mm_struct *mm) -+{ -+ if (atomic_dec_and_test(&mm->mm_count)) -+ call_rcu(&mm->delayed_drop, __mmdrop_delayed); -+} -+#else -+# define mmdrop_delayed(mm) mmdrop(mm) -+#endif -+ - /* - * This has to be called after a get_task_mm()/mmget_not_zero() - * followed by taking the mmap_sem for writing before modifying the -diff --git a/kernel/fork.c b/kernel/fork.c -index 309f4a20d4ac..d4ec53c72577 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -642,6 +642,19 @@ void __mmdrop(struct mm_struct *mm) - } - EXPORT_SYMBOL_GPL(__mmdrop); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+/* -+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't -+ * want another facility to make this work. -+ */ -+void __mmdrop_delayed(struct rcu_head *rhp) -+{ -+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); -+ -+ __mmdrop(mm); -+} -+#endif -+ - static void mmdrop_async_fn(struct work_struct *work) - { - struct mm_struct *mm; -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 5c23d1272429..cb89c90513dd 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -2729,9 +2729,13 @@ static struct rq *finish_task_switch(struct task_struct *prev) - * provided by mmdrop(), - * - a sync_core for SYNC_CORE. - */ -+ /* -+ * We use mmdrop_delayed() here so we don't have to do the -+ * full __mmdrop() when we are the last user. -+ */ - if (mm) { - membarrier_mm_sync_core_before_usermode(mm); -- mmdrop(mm); -+ mmdrop_delayed(mm); - } - if (unlikely(prev_state == TASK_DEAD)) { - if (prev->sched_class->task_dead) -@@ -5601,6 +5605,8 @@ void sched_setnuma(struct task_struct *p, int nid) - #endif /* CONFIG_NUMA_BALANCING */ - - #ifdef CONFIG_HOTPLUG_CPU -+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); -+ - /* - * Ensure that the idle task is using init_mm right before its CPU goes - * offline. -@@ -5616,7 +5622,11 @@ void idle_task_exit(void) - current->active_mm = &init_mm; - finish_arch_post_lock_switch(); - } -- mmdrop(mm); -+ /* -+ * Defer the cleanup to an alive cpu. On RT we can neither -+ * call mmdrop() nor mmdrop_delayed() from here. -+ */ -+ per_cpu(idle_last_mm, smp_processor_id()) = mm; - } - - /* -@@ -5928,6 +5938,10 @@ int sched_cpu_dying(unsigned int cpu) - update_max_interval(); - nohz_balance_exit_idle(rq); - hrtick_clear(rq); -+ if (per_cpu(idle_last_mm, cpu)) { -+ mmdrop_delayed(per_cpu(idle_last_mm, cpu)); -+ per_cpu(idle_last_mm, cpu) = NULL; -+ } - return 0; - } - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch deleted file mode 100644 index 8c2fd8933..000000000 --- a/kernel/patches-4.19.x-rt/0109-kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 375e6c07eb8de47fab60360d59bbeb611508c54c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 21 Nov 2016 19:31:08 +0100 -Subject: [PATCH 109/328] kernel/sched: move stack + kprobe clean up to - __put_task_struct() - -There is no need to free the stack before the task struct (except for reasons -mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if -CONFIG_THREAD_INFO_IN_TASK")). This also comes handy on -RT because we can't -free memory in preempt disabled region. - -Cc: stable-rt@vger.kernel.org #for kprobe_flush_task() -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/fork.c | 10 ++++++++++ - kernel/sched/core.c | 9 --------- - 2 files changed, 10 insertions(+), 9 deletions(-) - -diff --git a/kernel/fork.c b/kernel/fork.c -index d4ec53c72577..29b54a64daf5 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -40,6 +40,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -698,6 +699,15 @@ void __put_task_struct(struct task_struct *tsk) - WARN_ON(atomic_read(&tsk->usage)); - WARN_ON(tsk == current); - -+ /* -+ * Remove function-return probe instances associated with this -+ * task and put them back on the free list. -+ */ -+ kprobe_flush_task(tsk); -+ -+ /* Task is done with its stack. */ -+ put_task_stack(tsk); -+ - cgroup_free(tsk); - task_numa_free(tsk, true); - security_task_free(tsk); -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index cb89c90513dd..79e0d052e848 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -2741,15 +2741,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); - -- /* -- * Remove function-return probe instances associated with this -- * task and put them back on the free list. -- */ -- kprobe_flush_task(prev); -- -- /* Task is done with its stack. */ -- put_task_stack(prev); -- - put_task_struct(prev); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch deleted file mode 100644 index fe7a4b093..000000000 --- a/kernel/patches-4.19.x-rt/0110-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 713f8459593a71106384c1a9ba33189561ea892e Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sat, 25 Jun 2011 09:21:04 +0200 -Subject: [PATCH 110/328] sched: Add saved_state for tasks blocked on sleeping - locks - -Spinlocks are state preserving in !RT. RT changes the state when a -task gets blocked on a lock. So we need to remember the state before -the lock contention. If a regular wakeup (not a RTmutex related -wakeup) happens, the saved_state is updated to running. When the lock -sleep is done, the saved state is restored. - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 3 +++ - kernel/sched/core.c | 33 ++++++++++++++++++++++++++++++++- - kernel/sched/sched.h | 1 + - 3 files changed, 36 insertions(+), 1 deletion(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 0b8850b6093b..f180bfadff33 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -600,6 +600,8 @@ struct task_struct { - #endif - /* -1 unrunnable, 0 runnable, >0 stopped: */ - volatile long state; -+ /* saved state for "spinlock sleepers" */ -+ volatile long saved_state; - - /* - * This begins the randomizable portion of task_struct. Only -@@ -1621,6 +1623,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr); - - extern int wake_up_state(struct task_struct *tsk, unsigned int state); - extern int wake_up_process(struct task_struct *tsk); -+extern int wake_up_lock_sleeper(struct task_struct *tsk); - extern void wake_up_new_task(struct task_struct *tsk); - - #ifdef CONFIG_SMP -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 79e0d052e848..d1c564acff76 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1999,8 +1999,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) - */ - raw_spin_lock_irqsave(&p->pi_lock, flags); - smp_mb__after_spinlock(); -- if (!(p->state & state)) -+ if (!(p->state & state)) { -+ /* -+ * The task might be running due to a spinlock sleeper -+ * wakeup. Check the saved state and set it to running -+ * if the wakeup condition is true. -+ */ -+ if (!(wake_flags & WF_LOCK_SLEEPER)) { -+ if (p->saved_state & state) { -+ p->saved_state = TASK_RUNNING; -+ success = 1; -+ } -+ } - goto out; -+ } -+ -+ /* -+ * If this is a regular wakeup, then we can unconditionally -+ * clear the saved state of a "lock sleeper". -+ */ -+ if (!(wake_flags & WF_LOCK_SLEEPER)) -+ p->saved_state = TASK_RUNNING; - - trace_sched_waking(p); - -@@ -2164,6 +2183,18 @@ int wake_up_process(struct task_struct *p) - } - EXPORT_SYMBOL(wake_up_process); - -+/** -+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" -+ * @p: The process to be woken up. -+ * -+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate -+ * the nature of the wakeup. -+ */ -+int wake_up_lock_sleeper(struct task_struct *p) -+{ -+ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER); -+} -+ - int wake_up_state(struct task_struct *p, unsigned int state) - { - return try_to_wake_up(p, state, 0); -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 94bec97bd5e2..c79e32488940 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1439,6 +1439,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) - #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ - #define WF_FORK 0x02 /* Child wakeup after fork */ - #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ -+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ - - /* - * To aid in avoiding the subversion of "niceness" due to uneven distribution --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch b/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch deleted file mode 100644 index 9a40c694b..000000000 --- a/kernel/patches-4.19.x-rt/0111-sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch +++ /dev/null @@ -1,56 +0,0 @@ -From d8deeba7aff8ef76350524de5c15b1c12563e91d Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 7 Jun 2011 09:19:06 +0200 -Subject: [PATCH 111/328] sched: Do not account rcu_preempt_depth on RT in - might_sleep() - -RT changes the rcu_preempt_depth semantics, so we cannot check for it -in might_sleep(). - -Signed-off-by: Thomas Gleixner ---- - include/linux/rcupdate.h | 7 +++++++ - kernel/sched/core.c | 2 +- - 2 files changed, 8 insertions(+), 1 deletion(-) - -diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 68cbe111420b..027c58cdbb6e 100644 ---- a/include/linux/rcupdate.h -+++ b/include/linux/rcupdate.h -@@ -73,6 +73,11 @@ void synchronize_rcu(void); - * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. - */ - #define rcu_preempt_depth() (current->rcu_read_lock_nesting) -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+#else -+static inline int sched_rcu_preempt_depth(void) { return 0; } -+#endif - - #else /* #ifdef CONFIG_PREEMPT_RCU */ - -@@ -96,6 +101,8 @@ static inline int rcu_preempt_depth(void) - return 0; - } - -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+ - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - - /* Internal to kernel */ -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d1c564acff76..59d43c084023 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -6198,7 +6198,7 @@ void __init sched_init(void) - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - static inline int preempt_count_equals(int preempt_offset) - { -- int nested = preempt_count() + rcu_preempt_depth(); -+ int nested = preempt_count() + sched_rcu_preempt_depth(); - - return (nested == preempt_offset); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch b/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch deleted file mode 100644 index 0035821cb..000000000 --- a/kernel/patches-4.19.x-rt/0112-sched-Use-the-proper-LOCK_OFFSET-for-cond_resched.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 875b7c458428cc65b17950157a9e804383864d31 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 22:51:33 +0200 -Subject: [PATCH 112/328] sched: Use the proper LOCK_OFFSET for cond_resched() - -RT does not increment preempt count when a 'sleeping' spinlock is -locked. Update PREEMPT_LOCK_OFFSET for that case. - -Signed-off-by: Thomas Gleixner ---- - include/linux/preempt.h | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index f7a17fcc3fec..b7fe717eb1f4 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -118,7 +118,11 @@ - /* - * The preempt_count offset after spin_lock() - */ -+#if !defined(CONFIG_PREEMPT_RT_FULL) - #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET -+#else -+#define PREEMPT_LOCK_OFFSET 0 -+#endif - - /* - * The preempt_count offset needed for things like: --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch b/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch deleted file mode 100644 index 8a0a8ff17..000000000 --- a/kernel/patches-4.19.x-rt/0113-sched-Disable-TTWU_QUEUE-on-RT.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 365d52b7071fa4453be6483864c31584995053b6 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 13 Sep 2011 16:42:35 +0200 -Subject: [PATCH 113/328] sched: Disable TTWU_QUEUE on RT - -The queued remote wakeup mechanism can introduce rather large -latencies if the number of migrated tasks is high. Disable it for RT. - -Signed-off-by: Thomas Gleixner ---- - kernel/sched/features.h | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/kernel/sched/features.h b/kernel/sched/features.h -index 85ae8488039c..68de18405857 100644 ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -46,11 +46,16 @@ SCHED_FEAT(LB_BIAS, true) - */ - SCHED_FEAT(NONTASK_CAPACITY, true) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+SCHED_FEAT(TTWU_QUEUE, false) -+#else -+ - /* - * Queue remote wakeups on the target CPU and process them - * using the scheduler IPI. Reduces rq->lock contention/bounces. - */ - SCHED_FEAT(TTWU_QUEUE, true) -+#endif - - /* - * When doing wakeups, attempt to limit superfluous scans of the LLC domain. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch deleted file mode 100644 index 1e950adcf..000000000 --- a/kernel/patches-4.19.x-rt/0114-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ /dev/null @@ -1,44 +0,0 @@ -From eb876f743020816179d1fc4b036e540d72c771f4 Mon Sep 17 00:00:00 2001 -From: Steven Rostedt -Date: Mon, 18 Mar 2013 15:12:49 -0400 -Subject: [PATCH 114/328] sched/workqueue: Only wake up idle workers if not - blocked on sleeping spin lock - -In -rt, most spin_locks() turn into mutexes. One of these spin_lock -conversions is performed on the workqueue gcwq->lock. When the idle -worker is worken, the first thing it will do is grab that same lock and -it too will block, possibly jumping into the same code, but because -nr_running would already be decremented it prevents an infinite loop. - -But this is still a waste of CPU cycles, and it doesn't follow the method -of mainline, as new workers should only be woken when a worker thread is -truly going to sleep, and not just blocked on a spin_lock(). - -Check the saved_state too before waking up new workers. - - -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 59d43c084023..e792543de8eb 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3539,8 +3539,10 @@ static void __sched notrace __schedule(bool preempt) - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. -+ * Only call wake up if prev isn't blocked on a sleeping -+ * spin lock. - */ -- if (prev->flags & PF_WQ_WORKER) { -+ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch deleted file mode 100644 index 527c94903..000000000 --- a/kernel/patches-4.19.x-rt/0115-rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ /dev/null @@ -1,161 +0,0 @@ -From d5c888e692b5c34315ee9d6c6019dc054bb3650c Mon Sep 17 00:00:00 2001 -From: Daniel Bristot de Oliveira -Date: Mon, 26 Jun 2017 17:07:15 +0200 -Subject: [PATCH 115/328] rt: Increase/decrease the nr of migratory tasks when - enabling/disabling migration - -There is a problem in the migrate_disable()/enable() implementation -regarding the number of migratory tasks in the rt/dl RQs. The problem -is the following: - -When a task is attached to the rt runqueue, it is checked if it either -can run in more than one CPU, or if it is with migration disable. If -either check is true, the rt_rq->rt_nr_migratory counter is not -increased. The counter increases otherwise. - -When the task is detached, the same check is done. If either check is -true, the rt_rq->rt_nr_migratory counter is not decreased. The counter -decreases otherwise. The same check is done in the dl scheduler. - -One important thing is that, migrate disable/enable does not touch this -counter for tasks attached to the rt rq. So suppose the following chain -of events. - -Assumptions: -Task A is the only runnable task in A Task B runs on the CPU B -Task A runs on CFS (non-rt) Task B has RT priority -Thus, rt_nr_migratory is 0 B is running -Task A can run on all CPUS. - -Timeline: - CPU A/TASK A CPU B/TASK B -A takes the rt mutex X . -A disables migration . - . B tries to take the rt mutex X - . As it is held by A { - . A inherits the rt priority of B - . A is dequeued from CFS RQ of CPU A - . A is enqueued in the RT RQ of CPU A - . As migration is disabled - . rt_nr_migratory in A is not increased - . -A enables migration -A releases the rt mutex X { - A returns to its original priority - A ask to be dequeued from RT RQ { - As migration is now enabled and it can run on all CPUS { - rt_nr_migratory should be decreased - As rt_nr_migratory is 0, rt_nr_migratory under flows - } -} - -This variable is important because it notifies if there are more than one -runnable & migratory task in the runqueue. If there are more than one -tasks, the rt_rq is set as overloaded, and then tries to migrate some -tasks. This rule is important to keep the scheduler working conserving, -that is, in a system with M CPUs, the M highest priority tasks should be -running. - -As rt_nr_migratory is unsigned, it will become > 0, notifying that the -RQ is overloaded, activating pushing mechanism without need. - -This patch fixes this problem by decreasing/increasing the -rt/dl_nr_migratory in the migrate disable/enable operations. - -Reported-by: Pei Zhang -Reported-by: Luiz Capitulino -Signed-off-by: Daniel Bristot de Oliveira -Cc: Luis Claudio R. Goncalves -Cc: Clark Williams -Cc: Luiz Capitulino -Cc: Sebastian Andrzej Siewior -Cc: Thomas Gleixner -Cc: Steven Rostedt -Cc: Peter Zijlstra -Cc: Ingo Molnar -Cc: LKML -Cc: linux-rt-users -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 49 ++++++++++++++++++++++++++++++++++++++++----- - 1 file changed, 44 insertions(+), 5 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index e792543de8eb..57617777c4ba 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7185,6 +7185,47 @@ const u32 sched_prio_to_wmult[40] = { - - #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) - -+static inline void -+update_nr_migratory(struct task_struct *p, long delta) -+{ -+ if (unlikely((p->sched_class == &rt_sched_class || -+ p->sched_class == &dl_sched_class) && -+ p->nr_cpus_allowed > 1)) { -+ if (p->sched_class == &rt_sched_class) -+ task_rq(p)->rt.rt_nr_migratory += delta; -+ else -+ task_rq(p)->dl.dl_nr_migratory += delta; -+ } -+} -+ -+static inline void -+migrate_disable_update_cpus_allowed(struct task_struct *p) -+{ -+ struct rq *rq; -+ struct rq_flags rf; -+ -+ p->cpus_ptr = cpumask_of(smp_processor_id()); -+ -+ rq = task_rq_lock(p, &rf); -+ update_nr_migratory(p, -1); -+ p->nr_cpus_allowed = 1; -+ task_rq_unlock(rq, p, &rf); -+} -+ -+static inline void -+migrate_enable_update_cpus_allowed(struct task_struct *p) -+{ -+ struct rq *rq; -+ struct rq_flags rf; -+ -+ p->cpus_ptr = &p->cpus_mask; -+ -+ rq = task_rq_lock(p, &rf); -+ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); -+ update_nr_migratory(p, 1); -+ task_rq_unlock(rq, p, &rf); -+} -+ - void migrate_disable(void) - { - struct task_struct *p = current; -@@ -7208,10 +7249,9 @@ void migrate_disable(void) - } - - preempt_disable(); -- p->migrate_disable = 1; - -- p->cpus_ptr = cpumask_of(smp_processor_id()); -- p->nr_cpus_allowed = 1; -+ migrate_disable_update_cpus_allowed(p); -+ p->migrate_disable = 1; - - preempt_enable(); - } -@@ -7243,9 +7283,8 @@ void migrate_enable(void) - - preempt_disable(); - -- p->cpus_ptr = &p->cpus_mask; -- p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); - p->migrate_disable = 0; -+ migrate_enable_update_cpus_allowed(p); - - if (p->migrate_disable_update) { - struct rq *rq; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch b/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch deleted file mode 100644 index 4b56ddd36..000000000 --- a/kernel/patches-4.19.x-rt/0116-hotplug-Lightweight-get-online-cpus.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 54abe9a86c78a78b978f91e21929db155fc9f342 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 15 Jun 2011 12:36:06 +0200 -Subject: [PATCH 116/328] hotplug: Lightweight get online cpus - -get_online_cpus() is a heavy weight function which involves a global -mutex. migrate_disable() wants a simpler construct which prevents only -a CPU from going doing while a task is in a migrate disabled section. - -Implement a per cpu lockless mechanism, which serializes only in the -real unplug case on a global mutex. That serialization affects only -tasks on the cpu which should be brought down. - -Signed-off-by: Thomas Gleixner ---- - include/linux/cpu.h | 5 +++++ - kernel/cpu.c | 15 +++++++++++++++ - kernel/sched/core.c | 4 ++++ - 3 files changed, 24 insertions(+) - -diff --git a/include/linux/cpu.h b/include/linux/cpu.h -index aab4273810e3..e67645924404 100644 ---- a/include/linux/cpu.h -+++ b/include/linux/cpu.h -@@ -118,6 +118,8 @@ extern void cpu_hotplug_disable(void); - extern void cpu_hotplug_enable(void); - void clear_tasks_mm_cpumask(int cpu); - int cpu_down(unsigned int cpu); -+extern void pin_current_cpu(void); -+extern void unpin_current_cpu(void); - - #else /* CONFIG_HOTPLUG_CPU */ - -@@ -129,6 +131,9 @@ static inline int cpus_read_trylock(void) { return true; } - static inline void lockdep_assert_cpus_held(void) { } - static inline void cpu_hotplug_disable(void) { } - static inline void cpu_hotplug_enable(void) { } -+static inline void pin_current_cpu(void) { } -+static inline void unpin_current_cpu(void) { } -+ - #endif /* !CONFIG_HOTPLUG_CPU */ - - /* Wrappers which go away once all code is converted */ -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 2d850eaaf82e..328d7bf67d2f 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -281,6 +281,21 @@ static int cpu_hotplug_disabled; - - #ifdef CONFIG_HOTPLUG_CPU - -+/** -+ * pin_current_cpu - Prevent the current cpu from being unplugged -+ */ -+void pin_current_cpu(void) -+{ -+ -+} -+ -+/** -+ * unpin_current_cpu - Allow unplug of current cpu -+ */ -+void unpin_current_cpu(void) -+{ -+} -+ - DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); - - void cpus_read_lock(void) -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 57617777c4ba..42b42ebf52bc 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7249,6 +7249,7 @@ void migrate_disable(void) - } - - preempt_disable(); -+ pin_current_cpu(); - - migrate_disable_update_cpus_allowed(p); - p->migrate_disable = 1; -@@ -7314,12 +7315,15 @@ void migrate_enable(void) - arg.task = p; - arg.dest_cpu = dest_cpu; - -+ unpin_current_cpu(); - preempt_enable(); - stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); - tlb_migrate_finish(p->mm); -+ - return; - } - } -+ unpin_current_cpu(); - preempt_enable(); - } - EXPORT_SYMBOL(migrate_enable); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch b/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch deleted file mode 100644 index e545a2e3c..000000000 --- a/kernel/patches-4.19.x-rt/0117-trace-Add-migrate-disabled-counter-to-tracing-output.patch +++ /dev/null @@ -1,85 +0,0 @@ -From b04beb8ffbac46b3e231d5032cabf43bb8638052 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:56:42 +0200 -Subject: [PATCH 117/328] trace: Add migrate-disabled counter to tracing output - -Signed-off-by: Thomas Gleixner ---- - include/linux/trace_events.h | 2 ++ - kernel/trace/trace.c | 9 ++++++--- - kernel/trace/trace_events.c | 2 ++ - kernel/trace/trace_output.c | 5 +++++ - 4 files changed, 15 insertions(+), 3 deletions(-) - -diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h -index 0643c083ed86..1cc4d2da954c 100644 ---- a/include/linux/trace_events.h -+++ b/include/linux/trace_events.h -@@ -62,6 +62,8 @@ struct trace_entry { - unsigned char flags; - unsigned char preempt_count; - int pid; -+ unsigned short migrate_disable; -+ unsigned short padding; - }; - - #define TRACE_EVENT_TYPE_MAX \ -diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index e61aa1c68e99..5eccf1c201db 100644 ---- a/kernel/trace/trace.c -+++ b/kernel/trace/trace.c -@@ -2149,6 +2149,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, - ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | - (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | - (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); -+ -+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; - } - EXPORT_SYMBOL_GPL(tracing_generic_entry_update); - -@@ -3352,9 +3354,10 @@ static void print_lat_help_header(struct seq_file *m) - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" -- "# |||| / delay \n" -- "# cmd pid ||||| time | caller \n" -- "# \\ / ||||| \\ | / \n"); -+ "# |||| / _--=> migrate-disable\n" -+ "# ||||| / delay \n" -+ "# cmd pid |||||| time | caller \n" -+ "# \\ / ||||| \\ | / \n"); - } - - static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index ec340e1cbffc..1febb0ca4c81 100644 ---- a/kernel/trace/trace_events.c -+++ b/kernel/trace/trace_events.c -@@ -188,6 +188,8 @@ static int trace_define_common_fields(void) - __common_field(unsigned char, flags); - __common_field(unsigned char, preempt_count); - __common_field(int, pid); -+ __common_field(unsigned short, migrate_disable); -+ __common_field(unsigned short, padding); - - return ret; - } -diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c -index 6e6cc64faa38..46c96744f09d 100644 ---- a/kernel/trace/trace_output.c -+++ b/kernel/trace/trace_output.c -@@ -494,6 +494,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) - else - trace_seq_putc(s, '.'); - -+ if (entry->migrate_disable) -+ trace_seq_printf(s, "%x", entry->migrate_disable); -+ else -+ trace_seq_putc(s, '.'); -+ - return !trace_seq_has_overflowed(s); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch b/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch deleted file mode 100644 index e654afef3..000000000 --- a/kernel/patches-4.19.x-rt/0118-lockdep-Make-it-RT-aware.patch +++ /dev/null @@ -1,77 +0,0 @@ -From b65e89f6b27831323b600bce37aa38ce596bdcd3 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 18:51:23 +0200 -Subject: [PATCH 118/328] lockdep: Make it RT aware - -teach lockdep that we don't really do softirqs on -RT. - -Signed-off-by: Thomas Gleixner ---- - include/linux/irqflags.h | 23 +++++++++++++++-------- - kernel/locking/lockdep.c | 2 ++ - 2 files changed, 17 insertions(+), 8 deletions(-) - -diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h -index 21619c92c377..b20eeb25e9fa 100644 ---- a/include/linux/irqflags.h -+++ b/include/linux/irqflags.h -@@ -43,14 +43,6 @@ do { \ - do { \ - current->hardirq_context--; \ - } while (0) --# define lockdep_softirq_enter() \ --do { \ -- current->softirq_context++; \ --} while (0) --# define lockdep_softirq_exit() \ --do { \ -- current->softirq_context--; \ --} while (0) - #else - # define trace_hardirqs_on() do { } while (0) - # define trace_hardirqs_off() do { } while (0) -@@ -64,6 +56,21 @@ do { \ - # define lockdep_softirq_exit() do { } while (0) - #endif - -+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) -+# define lockdep_softirq_enter() \ -+do { \ -+ current->softirq_context++; \ -+} while (0) -+# define lockdep_softirq_exit() \ -+do { \ -+ current->softirq_context--; \ -+} while (0) -+ -+#else -+# define lockdep_softirq_enter() do { } while (0) -+# define lockdep_softirq_exit() do { } while (0) -+#endif -+ - #if defined(CONFIG_IRQSOFF_TRACER) || \ - defined(CONFIG_PREEMPT_TRACER) - extern void stop_critical_timings(void); -diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c -index 1e272f6a01e7..1938b4bfb098 100644 ---- a/kernel/locking/lockdep.c -+++ b/kernel/locking/lockdep.c -@@ -3826,6 +3826,7 @@ static void check_flags(unsigned long flags) - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * We dont accurately track softirq state in e.g. - * hardirq contexts (such as on 4KSTACKS), so only -@@ -3840,6 +3841,7 @@ static void check_flags(unsigned long flags) - DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); - } - } -+#endif - - if (!debug_locks) - print_irqtrace_events(current); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch b/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch deleted file mode 100644 index e92a5bfc8..000000000 --- a/kernel/patches-4.19.x-rt/0119-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch +++ /dev/null @@ -1,302 +0,0 @@ -From 7d6dc49f4a2794dc14d0fb1bb4f669616ac12d88 Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Tue, 29 Nov 2011 20:18:22 -0500 -Subject: [PATCH 119/328] tasklet: Prevent tasklets from going into infinite - spin in RT - -When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, -and spinlocks turn are mutexes. But this can cause issues with -tasks disabling tasklets. A tasklet runs under ksoftirqd, and -if a tasklets are disabled with tasklet_disable(), the tasklet -count is increased. When a tasklet runs, it checks this counter -and if it is set, it adds itself back on the softirq queue and -returns. - -The problem arises in RT because ksoftirq will see that a softirq -is ready to run (the tasklet softirq just re-armed itself), and will -not sleep, but instead run the softirqs again. The tasklet softirq -will still see that the count is non-zero and will not execute -the tasklet and requeue itself on the softirq again, which will -cause ksoftirqd to run it again and again and again. - -It gets worse because ksoftirqd runs as a real-time thread. -If it preempted the task that disabled tasklets, and that task -has migration disabled, or can't run for other reasons, the tasklet -softirq will never run because the count will never be zero, and -ksoftirqd will go into an infinite loop. As an RT task, it this -becomes a big problem. - -This is a hack solution to have tasklet_disable stop tasklets, and -when a tasklet runs, instead of requeueing the tasklet softirqd -it delays it. When tasklet_enable() is called, and tasklets are -waiting, then the tasklet_enable() will kick the tasklets to continue. -This prevents the lock up from ksoftirq going into an infinite loop. - -[ rostedt@goodmis.org: ported to 3.0-rt ] - -Signed-off-by: Ingo Molnar -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner ---- - include/linux/interrupt.h | 33 +++++----- - kernel/softirq.c | 126 ++++++++++++++++++++++++++++++++------ - 2 files changed, 125 insertions(+), 34 deletions(-) - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index a943c07b54ba..e74936c7be48 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -542,8 +542,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) - to be executed on some cpu at least once after this. - * If the tasklet is already scheduled, but its execution is still not - started, it will be executed only once. -- * If this tasklet is already running on another CPU (or schedule is called -- from tasklet itself), it is rescheduled for later. -+ * If this tasklet is already running on another CPU, it is rescheduled -+ for later. -+ * Schedule must not be called from the tasklet itself (a lockup occurs) - * Tasklet is strictly serialized wrt itself, but not - wrt another tasklets. If client needs some intertask synchronization, - he makes it with spinlocks. -@@ -568,27 +569,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } - enum - { - TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ -- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_PENDING /* Tasklet is pending */ - }; - --#ifdef CONFIG_SMP -+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) -+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) -+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) -+ -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - static inline int tasklet_trylock(struct tasklet_struct *t) - { - return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); - } - -+static inline int tasklet_tryunlock(struct tasklet_struct *t) -+{ -+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; -+} -+ - static inline void tasklet_unlock(struct tasklet_struct *t) - { - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); - } - --static inline void tasklet_unlock_wait(struct tasklet_struct *t) --{ -- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } --} -+extern void tasklet_unlock_wait(struct tasklet_struct *t); -+ - #else - #define tasklet_trylock(t) 1 -+#define tasklet_tryunlock(t) 1 - #define tasklet_unlock_wait(t) do { } while (0) - #define tasklet_unlock(t) do { } while (0) - #endif -@@ -622,12 +632,7 @@ static inline void tasklet_disable(struct tasklet_struct *t) - smp_mb(); - } - --static inline void tasklet_enable(struct tasklet_struct *t) --{ -- smp_mb__before_atomic(); -- atomic_dec(&t->count); --} -- -+extern void tasklet_enable(struct tasklet_struct *t); - extern void tasklet_kill(struct tasklet_struct *t); - extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); - extern void tasklet_init(struct tasklet_struct *t, -diff --git a/kernel/softirq.c b/kernel/softirq.c -index 6f584861d329..1d3a482246cc 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -475,11 +476,38 @@ static void __tasklet_schedule_common(struct tasklet_struct *t, - unsigned long flags; - - local_irq_save(flags); -+ if (!tasklet_trylock(t)) { -+ local_irq_restore(flags); -+ return; -+ } -+ - head = this_cpu_ptr(headp); -- t->next = NULL; -- *head->tail = t; -- head->tail = &(t->next); -- raise_softirq_irqoff(softirq_nr); -+again: -+ /* We may have been preempted before tasklet_trylock -+ * and __tasklet_action may have already run. -+ * So double check the sched bit while the takslet -+ * is locked before adding it to the list. -+ */ -+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { -+ t->next = NULL; -+ *head->tail = t; -+ head->tail = &(t->next); -+ raise_softirq_irqoff(softirq_nr); -+ tasklet_unlock(t); -+ } else { -+ /* This is subtle. If we hit the corner case above -+ * It is possible that we get preempted right here, -+ * and another task has successfully called -+ * tasklet_schedule(), then this function, and -+ * failed on the trylock. Thus we must be sure -+ * before releasing the tasklet lock, that the -+ * SCHED_BIT is clear. Otherwise the tasklet -+ * may get its SCHED_BIT set, but not added to the -+ * list -+ */ -+ if (!tasklet_tryunlock(t)) -+ goto again; -+ } - local_irq_restore(flags); - } - -@@ -497,11 +525,21 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) - } - EXPORT_SYMBOL(__tasklet_hi_schedule); - -+void tasklet_enable(struct tasklet_struct *t) -+{ -+ if (!atomic_dec_and_test(&t->count)) -+ return; -+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) -+ tasklet_schedule(t); -+} -+EXPORT_SYMBOL(tasklet_enable); -+ - static void tasklet_action_common(struct softirq_action *a, - struct tasklet_head *tl_head, - unsigned int softirq_nr) - { - struct tasklet_struct *list; -+ int loops = 1000000; - - local_irq_disable(); - list = tl_head->head; -@@ -513,25 +551,56 @@ static void tasklet_action_common(struct softirq_action *a, - struct tasklet_struct *t = list; - - list = list->next; -+ /* -+ * Should always succeed - after a tasklist got on the -+ * list (after getting the SCHED bit set from 0 to 1), -+ * nothing but the tasklet softirq it got queued to can -+ * lock it: -+ */ -+ if (!tasklet_trylock(t)) { -+ WARN_ON(1); -+ continue; -+ } - -- if (tasklet_trylock(t)) { -- if (!atomic_read(&t->count)) { -- if (!test_and_clear_bit(TASKLET_STATE_SCHED, -- &t->state)) -- BUG(); -- t->func(t->data); -+ t->next = NULL; -+ -+ if (unlikely(atomic_read(&t->count))) { -+out_disabled: -+ /* implicit unlock: */ -+ wmb(); -+ t->state = TASKLET_STATEF_PENDING; -+ continue; -+ } -+ /* -+ * After this point on the tasklet might be rescheduled -+ * on another CPU, but it can only be added to another -+ * CPU's tasklet list if we unlock the tasklet (which we -+ * dont do yet). -+ */ -+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) -+ WARN_ON(1); -+again: -+ t->func(t->data); -+ -+ while (!tasklet_tryunlock(t)) { -+ /* -+ * If it got disabled meanwhile, bail out: -+ */ -+ if (atomic_read(&t->count)) -+ goto out_disabled; -+ /* -+ * If it got scheduled meanwhile, re-execute -+ * the tasklet function: -+ */ -+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) -+ goto again; -+ if (!--loops) { -+ printk("hm, tasklet state: %08lx\n", t->state); -+ WARN_ON(1); - tasklet_unlock(t); -- continue; -+ break; - } -- tasklet_unlock(t); - } -- -- local_irq_disable(); -- t->next = NULL; -- *tl_head->tail = t; -- tl_head->tail = &t->next; -- __raise_softirq_irqoff(softirq_nr); -- local_irq_enable(); - } - } - -@@ -563,7 +632,7 @@ void tasklet_kill(struct tasklet_struct *t) - - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do { -- yield(); -+ msleep(1); - } while (test_bit(TASKLET_STATE_SCHED, &t->state)); - } - tasklet_unlock_wait(t); -@@ -637,6 +706,23 @@ void __init softirq_init(void) - open_softirq(HI_SOFTIRQ, tasklet_hi_action); - } - -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) -+void tasklet_unlock_wait(struct tasklet_struct *t) -+{ -+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { -+ /* -+ * Hack for now to avoid this busy-loop: -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ msleep(1); -+#else -+ barrier(); -+#endif -+ } -+} -+EXPORT_SYMBOL(tasklet_unlock_wait); -+#endif -+ - static int ksoftirqd_should_run(unsigned int cpu) - { - return local_softirq_pending(); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch b/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch deleted file mode 100644 index b753ac4d5..000000000 --- a/kernel/patches-4.19.x-rt/0120-softirq-Check-preemption-after-reenabling-interrupts.patch +++ /dev/null @@ -1,179 +0,0 @@ -From 4f7f72bc0ff5be0e440cf01595c7b333d8f27f70 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 13 Nov 2011 17:17:09 +0100 -Subject: [PATCH 120/328] softirq: Check preemption after reenabling interrupts - -raise_softirq_irqoff() disables interrupts and wakes the softirq -daemon, but after reenabling interrupts there is no preemption check, -so the execution of the softirq thread might be delayed arbitrarily. - -In principle we could add that check to local_irq_enable/restore, but -that's overkill as the rasie_softirq_irqoff() sections are the only -ones which show this behaviour. - -Reported-by: Carsten Emde -Signed-off-by: Thomas Gleixner ---- - block/blk-softirq.c | 3 +++ - include/linux/preempt.h | 3 +++ - lib/irq_poll.c | 5 +++++ - net/core/dev.c | 7 +++++++ - 4 files changed, 18 insertions(+) - -diff --git a/block/blk-softirq.c b/block/blk-softirq.c -index 15c1f5e12eb8..1628277885a1 100644 ---- a/block/blk-softirq.c -+++ b/block/blk-softirq.c -@@ -53,6 +53,7 @@ static void trigger_softirq(void *data) - raise_softirq_irqoff(BLOCK_SOFTIRQ); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /* -@@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu) - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - - return 0; - } -@@ -143,6 +145,7 @@ void __blk_complete_request(struct request *req) - goto do_local; - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__blk_complete_request); - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index b7fe717eb1f4..9984f2b75b73 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -187,8 +187,10 @@ do { \ - - #ifdef CONFIG_PREEMPT_RT_BASE - # define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+# define preempt_check_resched_rt() preempt_check_resched() - #else - # define preempt_enable_no_resched() preempt_enable() -+# define preempt_check_resched_rt() barrier(); - #endif - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) -@@ -275,6 +277,7 @@ do { \ - #define preempt_disable_notrace() barrier() - #define preempt_enable_no_resched_notrace() barrier() - #define preempt_enable_notrace() barrier() -+#define preempt_check_resched_rt() barrier() - #define preemptible() 0 - - #define migrate_disable() barrier() -diff --git a/lib/irq_poll.c b/lib/irq_poll.c -index 86a709954f5a..9c069ef83d6d 100644 ---- a/lib/irq_poll.c -+++ b/lib/irq_poll.c -@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop) - list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(irq_poll_sched); - -@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop) - local_irq_save(flags); - __irq_poll_complete(iop); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(irq_poll_complete); - -@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) - } - - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Even though interrupts have been re-enabled, this - * access is safe because interrupts can only add new -@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) - __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); - - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - /** -@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu) - this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - - return 0; - } -diff --git a/net/core/dev.c b/net/core/dev.c -index c1a3baf16957..45ff62d35a1f 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -2727,6 +2727,7 @@ static void __netif_reschedule(struct Qdisc *q) - sd->output_queue_tailp = &q->next_sched; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - void __netif_schedule(struct Qdisc *q) -@@ -2789,6 +2790,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) - __this_cpu_write(softnet_data.completion_queue, skb); - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__dev_kfree_skb_irq); - -@@ -4261,6 +4263,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, - rps_unlock(sd); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - - atomic_long_inc(&skb->dev->rx_dropped); - kfree_skb(skb); -@@ -5801,12 +5804,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) - sd->rps_ipi_list = NULL; - - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Send pending IPI's to kick RPS processing on remote cpus. */ - net_rps_send_ipi(remsd); - } else - #endif - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -5884,6 +5889,7 @@ void __napi_schedule(struct napi_struct *n) - local_irq_save(flags); - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__napi_schedule); - -@@ -9486,6 +9492,7 @@ static int dev_cpu_dead(unsigned int oldcpu) - - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - - #ifdef CONFIG_RPS - remsd = oldsd->rps_ipi_list; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch b/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch deleted file mode 100644 index 5990b2d37..000000000 --- a/kernel/patches-4.19.x-rt/0121-softirq-Disable-softirq-stacks-for-RT.patch +++ /dev/null @@ -1,176 +0,0 @@ -From 72daa94331fe79e1c1b596147ec920920f1cd87a Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 18 Jul 2011 13:59:17 +0200 -Subject: [PATCH 121/328] softirq: Disable softirq stacks for RT - -Disable extra stacks for softirqs. We want to preempt softirqs and -having them on special IRQ-stack does not make this easier. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/kernel/irq.c | 2 ++ - arch/powerpc/kernel/misc_32.S | 2 ++ - arch/powerpc/kernel/misc_64.S | 2 ++ - arch/sh/kernel/irq.c | 2 ++ - arch/sparc/kernel/irq_64.c | 2 ++ - arch/x86/entry/entry_64.S | 2 ++ - arch/x86/kernel/irq_32.c | 2 ++ - include/linux/interrupt.h | 2 +- - 8 files changed, 15 insertions(+), 1 deletion(-) - -diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c -index d37704ebccdb..b45a9849b687 100644 ---- a/arch/powerpc/kernel/irq.c -+++ b/arch/powerpc/kernel/irq.c -@@ -766,6 +766,7 @@ void irq_ctx_init(void) - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curtp, *irqtp; -@@ -783,6 +784,7 @@ void do_softirq_own_stack(void) - if (irqtp->flags) - set_bits(irqtp->flags, &curtp->flags); - } -+#endif - - irq_hw_number_t virq_to_hw(unsigned int virq) - { -diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S -index 695b24a2d954..032ada21b7bd 100644 ---- a/arch/powerpc/kernel/misc_32.S -+++ b/arch/powerpc/kernel/misc_32.S -@@ -42,6 +42,7 @@ - * We store the saved ksp_limit in the unused part - * of the STACK_FRAME_OVERHEAD - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - stw r0,4(r1) -@@ -58,6 +59,7 @@ _GLOBAL(call_do_softirq) - stw r10,THREAD+KSP_LIMIT(r2) - mtlr r0 - blr -+#endif - - /* - * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); -diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S -index facc02964ab3..8b1774186c68 100644 ---- a/arch/powerpc/kernel/misc_64.S -+++ b/arch/powerpc/kernel/misc_64.S -@@ -32,6 +32,7 @@ - - .text - -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - std r0,16(r1) -@@ -42,6 +43,7 @@ _GLOBAL(call_do_softirq) - ld r0,16(r1) - mtlr r0 - blr -+#endif - - _GLOBAL(call_do_irq) - mflr r0 -diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c -index 5717c7cbdd97..66dd399b2007 100644 ---- a/arch/sh/kernel/irq.c -+++ b/arch/sh/kernel/irq.c -@@ -148,6 +148,7 @@ void irq_ctx_exit(int cpu) - hardirq_ctx[cpu] = NULL; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curctx; -@@ -175,6 +176,7 @@ void do_softirq_own_stack(void) - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); - } -+#endif - #else - static inline void handle_one_irq(unsigned int irq) - { -diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c -index 713670e6d13d..5dfc715343f9 100644 ---- a/arch/sparc/kernel/irq_64.c -+++ b/arch/sparc/kernel/irq_64.c -@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) - set_irq_regs(old_regs); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - void *orig_sp, *sp = softirq_stack[smp_processor_id()]; -@@ -868,6 +869,7 @@ void do_softirq_own_stack(void) - __asm__ __volatile__("mov %0, %%sp" - : : "r" (orig_sp)); - } -+#endif - - #ifdef CONFIG_HOTPLUG_CPU - void fixup_irqs(void) -diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index ccb5e3486aee..7ffd83c57ef2 100644 ---- a/arch/x86/entry/entry_64.S -+++ b/arch/x86/entry/entry_64.S -@@ -1083,6 +1083,7 @@ bad_gs: - jmp 2b - .previous - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* Call softirq on interrupt stack. Interrupts are off. */ - ENTRY(do_softirq_own_stack) - pushq %rbp -@@ -1093,6 +1094,7 @@ ENTRY(do_softirq_own_stack) - leaveq - ret - ENDPROC(do_softirq_own_stack) -+#endif - - #ifdef CONFIG_XEN - idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 -diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c -index 95600a99ae93..9192d76085ba 100644 ---- a/arch/x86/kernel/irq_32.c -+++ b/arch/x86/kernel/irq_32.c -@@ -130,6 +130,7 @@ void irq_ctx_init(int cpu) - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct irq_stack *irqstk; -@@ -146,6 +147,7 @@ void do_softirq_own_stack(void) - - call_on_stack(__do_softirq, isp); - } -+#endif - - bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) - { -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index e74936c7be48..cb2d1384cb0d 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -506,7 +506,7 @@ struct softirq_action - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); - --#ifdef __ARCH_HAS_DO_SOFTIRQ -+#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL) - void do_softirq_own_stack(void); - #else - static inline void do_softirq_own_stack(void) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch b/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch deleted file mode 100644 index f77e66b2b..000000000 --- a/kernel/patches-4.19.x-rt/0122-softirq-Split-softirq-locks.patch +++ /dev/null @@ -1,831 +0,0 @@ -From d9b80fda665aeecc128e15d7771a29dcf01f3e36 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 4 Oct 2012 14:20:47 +0100 -Subject: [PATCH 122/328] softirq: Split softirq locks - -The 3.x RT series removed the split softirq implementation in favour -of pushing softirq processing into the context of the thread which -raised it. Though this prevents us from handling the various softirqs -at different priorities. Now instead of reintroducing the split -softirq threads we split the locks which serialize the softirq -processing. - -If a softirq is raised in context of a thread, then the softirq is -noted on a per thread field, if the thread is in a bh disabled -region. If the softirq is raised from hard interrupt context, then the -bit is set in the flag field of ksoftirqd and ksoftirqd is invoked. -When a thread leaves a bh disabled region, then it tries to execute -the softirqs which have been raised in its own context. It acquires -the per softirq / per cpu lock for the softirq and then checks, -whether the softirq is still pending in the per cpu -local_softirq_pending() field. If yes, it runs the softirq. If no, -then some other task executed it already. This allows for zero config -softirq elevation in the context of user space tasks or interrupt -threads. - -Signed-off-by: Thomas Gleixner ---- - include/linux/bottom_half.h | 34 +++ - include/linux/interrupt.h | 15 +- - include/linux/preempt.h | 15 +- - include/linux/sched.h | 3 + - init/main.c | 1 + - kernel/softirq.c | 491 ++++++++++++++++++++++++++++++------ - kernel/time/tick-sched.c | 9 +- - 7 files changed, 478 insertions(+), 90 deletions(-) - -diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h -index a19519f4241d..40dd5ef9c154 100644 ---- a/include/linux/bottom_half.h -+++ b/include/linux/bottom_half.h -@@ -4,6 +4,39 @@ - - #include - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+extern void __local_bh_disable(void); -+extern void _local_bh_enable(void); -+extern void __local_bh_enable(void); -+ -+static inline void local_bh_disable(void) -+{ -+ __local_bh_disable(); -+} -+ -+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) -+{ -+ __local_bh_disable(); -+} -+ -+static inline void local_bh_enable(void) -+{ -+ __local_bh_enable(); -+} -+ -+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) -+{ -+ __local_bh_enable(); -+} -+ -+static inline void local_bh_enable_ip(unsigned long ip) -+{ -+ __local_bh_enable(); -+} -+ -+#else -+ - #ifdef CONFIG_TRACE_IRQFLAGS - extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); - #else -@@ -31,5 +64,6 @@ static inline void local_bh_enable(void) - { - __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); - } -+#endif - - #endif /* _LINUX_BH_H */ -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index cb2d1384cb0d..6c25b962ba89 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -503,10 +503,11 @@ struct softirq_action - void (*action)(struct softirq_action *); - }; - -+#ifndef CONFIG_PREEMPT_RT_FULL - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); -- --#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL) -+static inline void thread_do_softirq(void) { do_softirq(); } -+#ifdef __ARCH_HAS_DO_SOFTIRQ - void do_softirq_own_stack(void); - #else - static inline void do_softirq_own_stack(void) -@@ -514,6 +515,9 @@ static inline void do_softirq_own_stack(void) - __do_softirq(); - } - #endif -+#else -+extern void thread_do_softirq(void); -+#endif - - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); -@@ -521,6 +525,7 @@ extern void __raise_softirq_irqoff(unsigned int nr); - - extern void raise_softirq_irqoff(unsigned int nr); - extern void raise_softirq(unsigned int nr); -+extern void softirq_check_pending_idle(void); - - DECLARE_PER_CPU(struct task_struct *, ksoftirqd); - -@@ -638,6 +643,12 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); - extern void tasklet_init(struct tasklet_struct *t, - void (*func)(unsigned long), unsigned long data); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void softirq_early_init(void); -+#else -+static inline void softirq_early_init(void) { } -+#endif -+ - struct tasklet_hrtimer { - struct hrtimer timer; - struct tasklet_struct tasklet; -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index 9984f2b75b73..27c3176d88d2 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -51,7 +51,11 @@ - #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) - #define NMI_OFFSET (1UL << NMI_SHIFT) - --#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#else -+# define SOFTIRQ_DISABLE_OFFSET (0) -+#endif - - /* We use the MSB mostly because its available */ - #define PREEMPT_NEED_RESCHED 0x80000000 -@@ -81,9 +85,15 @@ - #include - - #define hardirq_count() (preempt_count() & HARDIRQ_MASK) --#define softirq_count() (preempt_count() & SOFTIRQ_MASK) - #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ - | NMI_MASK)) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define softirq_count() (preempt_count() & SOFTIRQ_MASK) -+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -+#else -+# define softirq_count() ((unsigned long)current->softirq_nestcnt) -+extern int in_serving_softirq(void); -+#endif - - /* - * Are we doing bottom half or hardware interrupt processing? -@@ -101,7 +111,6 @@ - #define in_irq() (hardirq_count()) - #define in_softirq() (softirq_count()) - #define in_interrupt() (irq_count()) --#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - #define in_nmi() (preempt_count() & NMI_MASK) - #define in_task() (!(preempt_count() & \ - (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) -diff --git a/include/linux/sched.h b/include/linux/sched.h -index f180bfadff33..f4ff928e6be3 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1198,6 +1198,8 @@ struct task_struct { - #endif - #ifdef CONFIG_PREEMPT_RT_BASE - struct rcu_head put_rcu; -+ int softirq_nestcnt; -+ unsigned int softirqs_raised; - #endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; -@@ -1395,6 +1397,7 @@ extern struct pid *cad_pid; - /* - * Per process flags - */ -+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ - #define PF_IDLE 0x00000002 /* I am an IDLE thread */ - #define PF_EXITING 0x00000004 /* Getting shut down */ - #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ -diff --git a/init/main.c b/init/main.c -index 38a603f62b7b..6e02188386a7 100644 ---- a/init/main.c -+++ b/init/main.c -@@ -560,6 +560,7 @@ asmlinkage __visible void __init start_kernel(void) - setup_command_line(command_line); - setup_nr_cpu_ids(); - setup_per_cpu_areas(); -+ softirq_early_init(); - smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ - boot_cpu_hotplug_init(); - -diff --git a/kernel/softirq.c b/kernel/softirq.c -index 1d3a482246cc..fd89f8ab85ac 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -26,7 +26,9 @@ - #include - #include - #include -+#include - #include -+#include - - #define CREATE_TRACE_POINTS - #include -@@ -63,6 +65,98 @@ const char * const softirq_to_name[NR_SOFTIRQS] = { - "TASKLET", "SCHED", "HRTIMER", "RCU" - }; - -+#ifdef CONFIG_NO_HZ_COMMON -+# ifdef CONFIG_PREEMPT_RT_FULL -+ -+struct softirq_runner { -+ struct task_struct *runner[NR_SOFTIRQS]; -+}; -+ -+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); -+ -+static inline void softirq_set_runner(unsigned int sirq) -+{ -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ -+ sr->runner[sirq] = current; -+} -+ -+static inline void softirq_clr_runner(unsigned int sirq) -+{ -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ -+ sr->runner[sirq] = NULL; -+} -+ -+/* -+ * On preempt-rt a softirq running context might be blocked on a -+ * lock. There might be no other runnable task on this CPU because the -+ * lock owner runs on some other CPU. So we have to go into idle with -+ * the pending bit set. Therefor we need to check this otherwise we -+ * warn about false positives which confuses users and defeats the -+ * whole purpose of this test. -+ * -+ * This code is called with interrupts disabled. -+ */ -+void softirq_check_pending_idle(void) -+{ -+ static int rate_limit; -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ u32 warnpending; -+ int i; -+ -+ if (rate_limit >= 10) -+ return; -+ -+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; -+ for (i = 0; i < NR_SOFTIRQS; i++) { -+ struct task_struct *tsk = sr->runner[i]; -+ -+ /* -+ * The wakeup code in rtmutex.c wakes up the task -+ * _before_ it sets pi_blocked_on to NULL under -+ * tsk->pi_lock. So we need to check for both: state -+ * and pi_blocked_on. -+ */ -+ if (tsk) { -+ raw_spin_lock(&tsk->pi_lock); -+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { -+ /* Clear all bits pending in that task */ -+ warnpending &= ~(tsk->softirqs_raised); -+ warnpending &= ~(1 << i); -+ } -+ raw_spin_unlock(&tsk->pi_lock); -+ } -+ } -+ -+ if (warnpending) { -+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", -+ warnpending); -+ rate_limit++; -+ } -+} -+# else -+/* -+ * On !PREEMPT_RT we just printk rate limited: -+ */ -+void softirq_check_pending_idle(void) -+{ -+ static int rate_limit; -+ -+ if (rate_limit < 10 && -+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { -+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", -+ local_softirq_pending()); -+ rate_limit++; -+ } -+} -+# endif -+ -+#else /* !CONFIG_NO_HZ_COMMON */ -+static inline void softirq_set_runner(unsigned int sirq) { } -+static inline void softirq_clr_runner(unsigned int sirq) { } -+#endif -+ - /* - * we cannot loop indefinitely here to avoid userspace starvation, - * but we also don't want to introduce a worst case 1/HZ latency -@@ -78,6 +172,27 @@ static void wakeup_softirqd(void) - wake_up_process(tsk); - } - -+static void handle_softirq(unsigned int vec_nr) -+{ -+ struct softirq_action *h = softirq_vec + vec_nr; -+ int prev_count; -+ -+ prev_count = preempt_count(); -+ -+ kstat_incr_softirqs_this_cpu(vec_nr); -+ -+ trace_softirq_entry(vec_nr); -+ h->action(h); -+ trace_softirq_exit(vec_nr); -+ if (unlikely(prev_count != preempt_count())) { -+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -+ vec_nr, softirq_to_name[vec_nr], h->action, -+ prev_count, preempt_count()); -+ preempt_count_set(prev_count); -+ } -+} -+ -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * If ksoftirqd is scheduled, we do not want to process pending softirqs - * right now. Let ksoftirqd handle this at its own rate, to get fairness, -@@ -93,6 +208,47 @@ static bool ksoftirqd_running(unsigned long pending) - return tsk && (tsk->state == TASK_RUNNING); - } - -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return local_softirq_pending(); -+} -+ -+static void handle_pending_softirqs(u32 pending) -+{ -+ struct softirq_action *h = softirq_vec; -+ int softirq_bit; -+ -+ local_irq_enable(); -+ -+ h = softirq_vec; -+ -+ while ((softirq_bit = ffs(pending))) { -+ unsigned int vec_nr; -+ -+ h += softirq_bit - 1; -+ vec_nr = h - softirq_vec; -+ handle_softirq(vec_nr); -+ -+ h++; -+ pending >>= softirq_bit; -+ } -+ -+ rcu_bh_qs(); -+ local_irq_disable(); -+} -+ -+static void run_ksoftirqd(unsigned int cpu) -+{ -+ local_irq_disable(); -+ if (ksoftirqd_softirq_pending()) { -+ __do_softirq(); -+ local_irq_enable(); -+ cond_resched(); -+ return; -+ } -+ local_irq_enable(); -+} -+ - /* - * preempt_count and SOFTIRQ_OFFSET usage: - * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving -@@ -252,10 +408,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) - unsigned long end = jiffies + MAX_SOFTIRQ_TIME; - unsigned long old_flags = current->flags; - int max_restart = MAX_SOFTIRQ_RESTART; -- struct softirq_action *h; - bool in_hardirq; - __u32 pending; -- int softirq_bit; - - /* - * Mask out PF_MEMALLOC s current task context is borrowed for the -@@ -274,36 +428,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) - /* Reset the pending bitmask before enabling irqs */ - set_softirq_pending(0); - -- local_irq_enable(); -- -- h = softirq_vec; -- -- while ((softirq_bit = ffs(pending))) { -- unsigned int vec_nr; -- int prev_count; -- -- h += softirq_bit - 1; -- -- vec_nr = h - softirq_vec; -- prev_count = preempt_count(); -- -- kstat_incr_softirqs_this_cpu(vec_nr); -- -- trace_softirq_entry(vec_nr); -- h->action(h); -- trace_softirq_exit(vec_nr); -- if (unlikely(prev_count != preempt_count())) { -- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -- vec_nr, softirq_to_name[vec_nr], h->action, -- prev_count, preempt_count()); -- preempt_count_set(prev_count); -- } -- h++; -- pending >>= softirq_bit; -- } -- -- rcu_bh_qs(); -- local_irq_disable(); -+ handle_pending_softirqs(pending); - - pending = local_softirq_pending(); - if (pending) { -@@ -339,6 +464,248 @@ asmlinkage __visible void do_softirq(void) - local_irq_restore(flags); - } - -+/* -+ * This function must run with irqs disabled! -+ */ -+void raise_softirq_irqoff(unsigned int nr) -+{ -+ __raise_softirq_irqoff(nr); -+ -+ /* -+ * If we're in an interrupt or softirq, we're done -+ * (this also catches softirq-disabled code). We will -+ * actually run the softirq once we return from -+ * the irq or softirq. -+ * -+ * Otherwise we wake up ksoftirqd to make sure we -+ * schedule the softirq soon. -+ */ -+ if (!in_interrupt()) -+ wakeup_softirqd(); -+} -+ -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ trace_softirq_raise(nr); -+ or_softirq_pending(1UL << nr); -+} -+ -+static inline void local_bh_disable_nort(void) { local_bh_disable(); } -+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } -+static void ksoftirqd_set_sched_params(unsigned int cpu) { } -+static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } -+ -+#else /* !PREEMPT_RT_FULL */ -+ -+/* -+ * On RT we serialize softirq execution with a cpu local lock per softirq -+ */ -+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); -+ -+void __init softirq_early_init(void) -+{ -+ int i; -+ -+ for (i = 0; i < NR_SOFTIRQS; i++) -+ local_irq_lock_init(local_softirq_locks[i]); -+} -+ -+static void lock_softirq(int which) -+{ -+ local_lock(local_softirq_locks[which]); -+} -+ -+static void unlock_softirq(int which) -+{ -+ local_unlock(local_softirq_locks[which]); -+} -+ -+static void do_single_softirq(int which) -+{ -+ unsigned long old_flags = current->flags; -+ -+ current->flags &= ~PF_MEMALLOC; -+ vtime_account_irq_enter(current); -+ current->flags |= PF_IN_SOFTIRQ; -+ lockdep_softirq_enter(); -+ local_irq_enable(); -+ handle_softirq(which); -+ local_irq_disable(); -+ lockdep_softirq_exit(); -+ current->flags &= ~PF_IN_SOFTIRQ; -+ vtime_account_irq_enter(current); -+ current_restore_flags(old_flags, PF_MEMALLOC); -+} -+ -+/* -+ * Called with interrupts disabled. Process softirqs which were raised -+ * in current context (or on behalf of ksoftirqd). -+ */ -+static void do_current_softirqs(void) -+{ -+ while (current->softirqs_raised) { -+ int i = __ffs(current->softirqs_raised); -+ unsigned int pending, mask = (1U << i); -+ -+ current->softirqs_raised &= ~mask; -+ local_irq_enable(); -+ -+ /* -+ * If the lock is contended, we boost the owner to -+ * process the softirq or leave the critical section -+ * now. -+ */ -+ lock_softirq(i); -+ local_irq_disable(); -+ softirq_set_runner(i); -+ /* -+ * Check with the local_softirq_pending() bits, -+ * whether we need to process this still or if someone -+ * else took care of it. -+ */ -+ pending = local_softirq_pending(); -+ if (pending & mask) { -+ set_softirq_pending(pending & ~mask); -+ do_single_softirq(i); -+ } -+ softirq_clr_runner(i); -+ WARN_ON(current->softirq_nestcnt != 1); -+ local_irq_enable(); -+ unlock_softirq(i); -+ local_irq_disable(); -+ } -+} -+ -+void __local_bh_disable(void) -+{ -+ if (++current->softirq_nestcnt == 1) -+ migrate_disable(); -+} -+EXPORT_SYMBOL(__local_bh_disable); -+ -+void __local_bh_enable(void) -+{ -+ if (WARN_ON(current->softirq_nestcnt == 0)) -+ return; -+ -+ local_irq_disable(); -+ if (current->softirq_nestcnt == 1 && current->softirqs_raised) -+ do_current_softirqs(); -+ local_irq_enable(); -+ -+ if (--current->softirq_nestcnt == 0) -+ migrate_enable(); -+} -+EXPORT_SYMBOL(__local_bh_enable); -+ -+int in_serving_softirq(void) -+{ -+ return current->flags & PF_IN_SOFTIRQ; -+} -+EXPORT_SYMBOL(in_serving_softirq); -+ -+/* Called with preemption disabled */ -+static void run_ksoftirqd(unsigned int cpu) -+{ -+ local_irq_disable(); -+ current->softirq_nestcnt++; -+ -+ do_current_softirqs(); -+ current->softirq_nestcnt--; -+ local_irq_enable(); -+ cond_resched(); -+} -+ -+/* -+ * Called from netif_rx_ni(). Preemption enabled, but migration -+ * disabled. So the cpu can't go away under us. -+ */ -+void thread_do_softirq(void) -+{ -+ if (!in_serving_softirq() && current->softirqs_raised) { -+ current->softirq_nestcnt++; -+ do_current_softirqs(); -+ current->softirq_nestcnt--; -+ } -+} -+ -+static void do_raise_softirq_irqoff(unsigned int nr) -+{ -+ trace_softirq_raise(nr); -+ or_softirq_pending(1UL << nr); -+ -+ /* -+ * If we are not in a hard interrupt and inside a bh disabled -+ * region, we simply raise the flag on current. local_bh_enable() -+ * will make sure that the softirq is executed. Otherwise we -+ * delegate it to ksoftirqd. -+ */ -+ if (!in_irq() && current->softirq_nestcnt) -+ current->softirqs_raised |= (1U << nr); -+ else if (__this_cpu_read(ksoftirqd)) -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); -+} -+ -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ if (!in_irq() && !current->softirq_nestcnt) -+ wakeup_softirqd(); -+} -+ -+/* -+ * This function must run with irqs disabled! -+ */ -+void raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ -+ /* -+ * If we're in an hard interrupt we let irq return code deal -+ * with the wakeup of ksoftirqd. -+ */ -+ if (in_irq()) -+ return; -+ /* -+ * If we are in thread context but outside of a bh disabled -+ * region, we need to wake ksoftirqd as well. -+ * -+ * CHECKME: Some of the places which do that could be wrapped -+ * into local_bh_disable/enable pairs. Though it's unclear -+ * whether this is worth the effort. To find those places just -+ * raise a WARN() if the condition is met. -+ */ -+ if (!current->softirq_nestcnt) -+ wakeup_softirqd(); -+} -+ -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return current->softirqs_raised; -+} -+ -+static inline void local_bh_disable_nort(void) { } -+static inline void _local_bh_enable_nort(void) { } -+ -+static inline void ksoftirqd_set_sched_params(unsigned int cpu) -+{ -+ struct sched_param param = { .sched_priority = 1 }; -+ -+ sched_setscheduler(current, SCHED_FIFO, ¶m); -+ /* Take over all pending softirqs when starting */ -+ local_irq_disable(); -+ current->softirqs_raised = local_softirq_pending(); -+ local_irq_enable(); -+} -+ -+static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) -+{ -+ struct sched_param param = { .sched_priority = 0 }; -+ -+ sched_setscheduler(current, SCHED_NORMAL, ¶m); -+} -+ -+#endif /* PREEMPT_RT_FULL */ - /* - * Enter an interrupt context. - */ -@@ -350,9 +717,9 @@ void irq_enter(void) - * Prevent raise_softirq from needlessly waking up ksoftirqd - * here, as softirq will be serviced on return from interrupt. - */ -- local_bh_disable(); -+ local_bh_disable_nort(); - tick_irq_enter(); -- _local_bh_enable(); -+ _local_bh_enable_nort(); - } - - __irq_enter(); -@@ -360,6 +727,7 @@ void irq_enter(void) - - static inline void invoke_softirq(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - if (ksoftirqd_running(local_softirq_pending())) - return; - -@@ -382,6 +750,15 @@ static inline void invoke_softirq(void) - } else { - wakeup_softirqd(); - } -+#else /* PREEMPT_RT_FULL */ -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ if (__this_cpu_read(ksoftirqd) && -+ __this_cpu_read(ksoftirqd)->softirqs_raised) -+ wakeup_softirqd(); -+ local_irq_restore(flags); -+#endif - } - - static inline void tick_irq_exit(void) -@@ -417,26 +794,6 @@ void irq_exit(void) - trace_hardirq_exit(); /* must be last! */ - } - --/* -- * This function must run with irqs disabled! -- */ --inline void raise_softirq_irqoff(unsigned int nr) --{ -- __raise_softirq_irqoff(nr); -- -- /* -- * If we're in an interrupt or softirq, we're done -- * (this also catches softirq-disabled code). We will -- * actually run the softirq once we return from -- * the irq or softirq. -- * -- * Otherwise we wake up ksoftirqd to make sure we -- * schedule the softirq soon. -- */ -- if (!in_interrupt()) -- wakeup_softirqd(); --} -- - void raise_softirq(unsigned int nr) - { - unsigned long flags; -@@ -446,12 +803,6 @@ void raise_softirq(unsigned int nr) - local_irq_restore(flags); - } - --void __raise_softirq_irqoff(unsigned int nr) --{ -- trace_softirq_raise(nr); -- or_softirq_pending(1UL << nr); --} -- - void open_softirq(int nr, void (*action)(struct softirq_action *)) - { - softirq_vec[nr].action = action; -@@ -725,23 +1076,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait); - - static int ksoftirqd_should_run(unsigned int cpu) - { -- return local_softirq_pending(); --} -- --static void run_ksoftirqd(unsigned int cpu) --{ -- local_irq_disable(); -- if (local_softirq_pending()) { -- /* -- * We can safely run softirq on inline stack, as we are not deep -- * in the task stack here. -- */ -- __do_softirq(); -- local_irq_enable(); -- cond_resched(); -- return; -- } -- local_irq_enable(); -+ return ksoftirqd_softirq_pending(); - } - - #ifdef CONFIG_HOTPLUG_CPU -@@ -808,6 +1143,8 @@ static int takeover_tasklets(unsigned int cpu) - - static struct smp_hotplug_thread softirq_threads = { - .store = &ksoftirqd, -+ .setup = ksoftirqd_set_sched_params, -+ .cleanup = ksoftirqd_clr_sched_params, - .thread_should_run = ksoftirqd_should_run, - .thread_fn = run_ksoftirqd, - .thread_comm = "ksoftirqd/%u", -diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 012bc81879bf..2b0ddd50e879 100644 ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -895,14 +895,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) - return false; - - if (unlikely(local_softirq_pending() && cpu_online(cpu))) { -- static int ratelimit; -- -- if (ratelimit < 10 && -- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { -- pr_warn("NOHZ: local_softirq_pending %02x\n", -- (unsigned int) local_softirq_pending()); -- ratelimit++; -- } -+ softirq_check_pending_idle(); - return false; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch b/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch deleted file mode 100644 index 4459a09ba..000000000 --- a/kernel/patches-4.19.x-rt/0123-net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ /dev/null @@ -1,40 +0,0 @@ -From afeabb59361466c6e88b4e95603508d7375f4aeb Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 16 Jun 2017 19:03:16 +0200 -Subject: [PATCH 123/328] net/core: use local_bh_disable() in netif_rx_ni() - -In 2004 netif_rx_ni() gained a preempt_disable() section around -netif_rx() and its do_softirq() + testing for it. The do_softirq() part -is required because netif_rx() raises the softirq but does not invoke -it. The preempt_disable() is required to remain on the same CPU which added the -skb to the per-CPU list. -All this can be avoided be putting this into a local_bh_disable()ed -section. The local_bh_enable() part will invoke do_softirq() if -required. - -Signed-off-by: Sebastian Andrzej Siewior ---- - net/core/dev.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - -diff --git a/net/core/dev.c b/net/core/dev.c -index 45ff62d35a1f..b754adb14205 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -4529,11 +4529,9 @@ int netif_rx_ni(struct sk_buff *skb) - - trace_netif_rx_ni_entry(skb); - -- preempt_disable(); -+ local_bh_disable(); - err = netif_rx_internal(skb); -- if (local_softirq_pending()) -- do_softirq(); -- preempt_enable(); -+ local_bh_enable(); - - return err; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch b/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch deleted file mode 100644 index 8f3fd1aaf..000000000 --- a/kernel/patches-4.19.x-rt/0124-genirq-Allow-disabling-of-softirq-processing-in-irq-.patch +++ /dev/null @@ -1,160 +0,0 @@ -From a6531b867486fabcd2c6edc5ee1148497582e2b1 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 31 Jan 2012 13:01:27 +0100 -Subject: [PATCH 124/328] genirq: Allow disabling of softirq processing in irq - thread context - -The processing of softirqs in irq thread context is a performance gain -for the non-rt workloads of a system, but it's counterproductive for -interrupts which are explicitely related to the realtime -workload. Allow such interrupts to prevent softirq processing in their -thread context. - -Signed-off-by: Thomas Gleixner ---- - include/linux/interrupt.h | 2 ++ - include/linux/irq.h | 4 +++- - kernel/irq/manage.c | 13 ++++++++++++- - kernel/irq/settings.h | 12 ++++++++++++ - kernel/softirq.c | 9 +++++++++ - 5 files changed, 38 insertions(+), 2 deletions(-) - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 6c25b962ba89..99f8b7ace7c9 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -62,6 +62,7 @@ - * interrupt handler after suspending interrupts. For system - * wakeup devices users need to implement wakeup detection in - * their interrupt handlers. -+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) - */ - #define IRQF_SHARED 0x00000080 - #define IRQF_PROBE_SHARED 0x00000100 -@@ -75,6 +76,7 @@ - #define IRQF_NO_THREAD 0x00010000 - #define IRQF_EARLY_RESUME 0x00020000 - #define IRQF_COND_SUSPEND 0x00040000 -+#define IRQF_NO_SOFTIRQ_CALL 0x00080000 - - #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) - -diff --git a/include/linux/irq.h b/include/linux/irq.h -index 6ecaf056ab63..d2487df7ae89 100644 ---- a/include/linux/irq.h -+++ b/include/linux/irq.h -@@ -69,6 +69,7 @@ enum irqchip_irq_state; - * IRQ_IS_POLLED - Always polled by another interrupt. Exclude - * it from the spurious interrupt detection - * mechanism and from core side polling. -+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) - * IRQ_DISABLE_UNLAZY - Disable lazy irq disable - */ - enum { -@@ -96,13 +97,14 @@ enum { - IRQ_PER_CPU_DEVID = (1 << 17), - IRQ_IS_POLLED = (1 << 18), - IRQ_DISABLE_UNLAZY = (1 << 19), -+ IRQ_NO_SOFTIRQ_CALL = (1 << 20), - }; - - #define IRQF_MODIFY_MASK \ - (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ - IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ -- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) -+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) - - #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) - -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index eadcbfbd434a..3858ac895777 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -1004,7 +1004,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) - atomic_inc(&desc->threads_handled); - - irq_finalize_oneshot(desc, action); -- local_bh_enable(); -+ /* -+ * Interrupts which have real time requirements can be set up -+ * to avoid softirq processing in the thread handler. This is -+ * safe as these interrupts do not raise soft interrupts. -+ */ -+ if (irq_settings_no_softirq_call(desc)) -+ _local_bh_enable(); -+ else -+ local_bh_enable(); - return ret; - } - -@@ -1514,6 +1522,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) - irqd_set(&desc->irq_data, IRQD_NO_BALANCING); - } - -+ if (new->flags & IRQF_NO_SOFTIRQ_CALL) -+ irq_settings_set_no_softirq_call(desc); -+ - if (irq_settings_can_autoenable(desc)) { - irq_startup(desc, IRQ_RESEND, IRQ_START_COND); - } else { -diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h -index e43795cd2ccf..47e2f9e23586 100644 ---- a/kernel/irq/settings.h -+++ b/kernel/irq/settings.h -@@ -17,6 +17,7 @@ enum { - _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, - _IRQ_IS_POLLED = IRQ_IS_POLLED, - _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, -+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, - _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, - }; - -@@ -31,6 +32,7 @@ enum { - #define IRQ_PER_CPU_DEVID GOT_YOU_MORON - #define IRQ_IS_POLLED GOT_YOU_MORON - #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON -+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON - #undef IRQF_MODIFY_MASK - #define IRQF_MODIFY_MASK GOT_YOU_MORON - -@@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) - desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); - } - -+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) -+{ -+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; -+} -+ -+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) -+{ -+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; -+} -+ - static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) - { - return desc->status_use_accessors & _IRQ_PER_CPU; -diff --git a/kernel/softirq.c b/kernel/softirq.c -index fd89f8ab85ac..3e9333d148ad 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -598,6 +598,15 @@ void __local_bh_enable(void) - } - EXPORT_SYMBOL(__local_bh_enable); - -+void _local_bh_enable(void) -+{ -+ if (WARN_ON(current->softirq_nestcnt == 0)) -+ return; -+ if (--current->softirq_nestcnt == 0) -+ migrate_enable(); -+} -+EXPORT_SYMBOL(_local_bh_enable); -+ - int in_serving_softirq(void) - { - return current->flags & PF_IN_SOFTIRQ; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch deleted file mode 100644 index 4d8232cf5..000000000 --- a/kernel/patches-4.19.x-rt/0125-softirq-split-timer-softirqs-out-of-ksoftirqd.patch +++ /dev/null @@ -1,214 +0,0 @@ -From e106eea6c2a414edbf8c47e0df2db6ea5a462c7f Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 20 Jan 2016 16:34:17 +0100 -Subject: [PATCH 125/328] softirq: split timer softirqs out of ksoftirqd - -The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with -timer wakeup which can not happen in hardirq context. The prio has been -risen from the normal SCHED_OTHER so the timer wakeup does not happen -too late. -With enough networking load it is possible that the system never goes -idle and schedules ksoftirqd and everything else with a higher priority. -One of the tasks left behind is one of RCU's threads and so we see stalls -and eventually run out of memory. -This patch moves the TIMER and HRTIMER softirqs out of the `ksoftirqd` -thread into its own `ktimersoftd`. The former can now run SCHED_OTHER -(same as mainline) and the latter at SCHED_FIFO due to the wakeups. - -From networking point of view: The NAPI callback runs after the network -interrupt thread completes. If its run time takes too long the NAPI code -itself schedules the `ksoftirqd`. Here in the thread it can run at -SCHED_OTHER priority and it won't defer RCU anymore. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/softirq.c | 85 +++++++++++++++++++++++++++++++++++++++++------- - 1 file changed, 73 insertions(+), 12 deletions(-) - -diff --git a/kernel/softirq.c b/kernel/softirq.c -index 3e9333d148ad..fe4e59c80a08 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -59,6 +59,10 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); - static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; - - DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) -+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); -+#endif - - const char * const softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", -@@ -172,6 +176,17 @@ static void wakeup_softirqd(void) - wake_up_process(tsk); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void wakeup_timer_softirqd(void) -+{ -+ /* Interrupts are disabled: no need to stop preemption */ -+ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); -+ -+ if (tsk && tsk->state != TASK_RUNNING) -+ wake_up_process(tsk); -+} -+#endif -+ - static void handle_softirq(unsigned int vec_nr) - { - struct softirq_action *h = softirq_vec + vec_nr; -@@ -493,7 +508,6 @@ void __raise_softirq_irqoff(unsigned int nr) - static inline void local_bh_disable_nort(void) { local_bh_disable(); } - static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } - static void ksoftirqd_set_sched_params(unsigned int cpu) { } --static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } - - #else /* !PREEMPT_RT_FULL */ - -@@ -640,8 +654,12 @@ void thread_do_softirq(void) - - static void do_raise_softirq_irqoff(unsigned int nr) - { -+ unsigned int mask; -+ -+ mask = 1UL << nr; -+ - trace_softirq_raise(nr); -- or_softirq_pending(1UL << nr); -+ or_softirq_pending(mask); - - /* - * If we are not in a hard interrupt and inside a bh disabled -@@ -650,16 +668,29 @@ static void do_raise_softirq_irqoff(unsigned int nr) - * delegate it to ksoftirqd. - */ - if (!in_irq() && current->softirq_nestcnt) -- current->softirqs_raised |= (1U << nr); -- else if (__this_cpu_read(ksoftirqd)) -- __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); -+ current->softirqs_raised |= mask; -+ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) -+ return; -+ -+ if (mask & TIMER_SOFTIRQS) -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; -+ else -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; -+} -+ -+static void wakeup_proper_softirq(unsigned int nr) -+{ -+ if ((1UL << nr) & TIMER_SOFTIRQS) -+ wakeup_timer_softirqd(); -+ else -+ wakeup_softirqd(); - } - - void __raise_softirq_irqoff(unsigned int nr) - { - do_raise_softirq_irqoff(nr); - if (!in_irq() && !current->softirq_nestcnt) -- wakeup_softirqd(); -+ wakeup_proper_softirq(nr); - } - - /* -@@ -685,7 +716,7 @@ void raise_softirq_irqoff(unsigned int nr) - * raise a WARN() if the condition is met. - */ - if (!current->softirq_nestcnt) -- wakeup_softirqd(); -+ wakeup_proper_softirq(nr); - } - - static inline int ksoftirqd_softirq_pending(void) -@@ -697,23 +728,38 @@ static inline void local_bh_disable_nort(void) { } - static inline void _local_bh_enable_nort(void) { } - - static inline void ksoftirqd_set_sched_params(unsigned int cpu) -+{ -+ /* Take over all but timer pending softirqs when starting */ -+ local_irq_disable(); -+ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; -+ local_irq_enable(); -+} -+ -+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) - { - struct sched_param param = { .sched_priority = 1 }; - - sched_setscheduler(current, SCHED_FIFO, ¶m); -- /* Take over all pending softirqs when starting */ -+ -+ /* Take over timer pending softirqs when starting */ - local_irq_disable(); -- current->softirqs_raised = local_softirq_pending(); -+ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; - local_irq_enable(); - } - --static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) -+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, -+ bool online) - { - struct sched_param param = { .sched_priority = 0 }; - - sched_setscheduler(current, SCHED_NORMAL, ¶m); - } - -+static int ktimer_softirqd_should_run(unsigned int cpu) -+{ -+ return current->softirqs_raised; -+} -+ - #endif /* PREEMPT_RT_FULL */ - /* - * Enter an interrupt context. -@@ -766,6 +812,9 @@ static inline void invoke_softirq(void) - if (__this_cpu_read(ksoftirqd) && - __this_cpu_read(ksoftirqd)->softirqs_raised) - wakeup_softirqd(); -+ if (__this_cpu_read(ktimer_softirqd) && -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised) -+ wakeup_timer_softirqd(); - local_irq_restore(flags); - #endif - } -@@ -1153,18 +1202,30 @@ static int takeover_tasklets(unsigned int cpu) - static struct smp_hotplug_thread softirq_threads = { - .store = &ksoftirqd, - .setup = ksoftirqd_set_sched_params, -- .cleanup = ksoftirqd_clr_sched_params, - .thread_should_run = ksoftirqd_should_run, - .thread_fn = run_ksoftirqd, - .thread_comm = "ksoftirqd/%u", - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct smp_hotplug_thread softirq_timer_threads = { -+ .store = &ktimer_softirqd, -+ .setup = ktimer_softirqd_set_sched_params, -+ .cleanup = ktimer_softirqd_clr_sched_params, -+ .thread_should_run = ktimer_softirqd_should_run, -+ .thread_fn = run_ksoftirqd, -+ .thread_comm = "ktimersoftd/%u", -+}; -+#endif -+ - static __init int spawn_ksoftirqd(void) - { - cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, - takeover_tasklets); - BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); -- -+#ifdef CONFIG_PREEMPT_RT_FULL -+ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); -+#endif - return 0; - } - early_initcall(spawn_ksoftirqd); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch b/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch deleted file mode 100644 index 609632ec2..000000000 --- a/kernel/patches-4.19.x-rt/0126-softirq-Avoid-local_softirq_pending-messages-if-ksof.patch +++ /dev/null @@ -1,111 +0,0 @@ -From aa3841d4115d3530ea77a590bbd13940881f5d37 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 18 Feb 2019 13:19:59 +0100 -Subject: [PATCH 126/328] softirq: Avoid "local_softirq_pending" messages if - ksoftirqd is blocked - -If the ksoftirqd thread has a softirq pending and is blocked on the -`local_softirq_locks' lock then softirq_check_pending_idle() won't -complain because the "lock owner" will mask away this softirq from the -mask of pending softirqs. -If ksoftirqd has an additional softirq pending then it won't be masked -out because we never look at ksoftirqd's mask. - -If there are still pending softirqs while going to idle check -ksoftirqd's and ktimersfotd's mask before complaining about unhandled -softirqs. - -Cc: stable-rt@vger.kernel.org -Tested-by: Juri Lelli -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/softirq.c | 57 ++++++++++++++++++++++++++++++++++-------------- - 1 file changed, 41 insertions(+), 16 deletions(-) - -diff --git a/kernel/softirq.c b/kernel/softirq.c -index fe4e59c80a08..1920985eeb09 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -92,6 +92,31 @@ static inline void softirq_clr_runner(unsigned int sirq) - sr->runner[sirq] = NULL; - } - -+static bool softirq_check_runner_tsk(struct task_struct *tsk, -+ unsigned int *pending) -+{ -+ bool ret = false; -+ -+ if (!tsk) -+ return ret; -+ -+ /* -+ * The wakeup code in rtmutex.c wakes up the task -+ * _before_ it sets pi_blocked_on to NULL under -+ * tsk->pi_lock. So we need to check for both: state -+ * and pi_blocked_on. -+ */ -+ raw_spin_lock(&tsk->pi_lock); -+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { -+ /* Clear all bits pending in that task */ -+ *pending &= ~(tsk->softirqs_raised); -+ ret = true; -+ } -+ raw_spin_unlock(&tsk->pi_lock); -+ -+ return ret; -+} -+ - /* - * On preempt-rt a softirq running context might be blocked on a - * lock. There might be no other runnable task on this CPU because the -@@ -104,6 +129,7 @@ static inline void softirq_clr_runner(unsigned int sirq) - */ - void softirq_check_pending_idle(void) - { -+ struct task_struct *tsk; - static int rate_limit; - struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); - u32 warnpending; -@@ -113,24 +139,23 @@ void softirq_check_pending_idle(void) - return; - - warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; -+ if (!warnpending) -+ return; - for (i = 0; i < NR_SOFTIRQS; i++) { -- struct task_struct *tsk = sr->runner[i]; -+ tsk = sr->runner[i]; - -- /* -- * The wakeup code in rtmutex.c wakes up the task -- * _before_ it sets pi_blocked_on to NULL under -- * tsk->pi_lock. So we need to check for both: state -- * and pi_blocked_on. -- */ -- if (tsk) { -- raw_spin_lock(&tsk->pi_lock); -- if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { -- /* Clear all bits pending in that task */ -- warnpending &= ~(tsk->softirqs_raised); -- warnpending &= ~(1 << i); -- } -- raw_spin_unlock(&tsk->pi_lock); -- } -+ if (softirq_check_runner_tsk(tsk, &warnpending)) -+ warnpending &= ~(1 << i); -+ } -+ -+ if (warnpending) { -+ tsk = __this_cpu_read(ksoftirqd); -+ softirq_check_runner_tsk(tsk, &warnpending); -+ } -+ -+ if (warnpending) { -+ tsk = __this_cpu_read(ktimer_softirqd); -+ softirq_check_runner_tsk(tsk, &warnpending); - } - - if (warnpending) { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch b/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch deleted file mode 100644 index 0abe02ab0..000000000 --- a/kernel/patches-4.19.x-rt/0127-softirq-Avoid-local_softirq_pending-messages-if-task.patch +++ /dev/null @@ -1,40 +0,0 @@ -From cbcc7f169a523a3b55877cb01d10cd2e96654d3a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 19 Feb 2019 16:49:29 +0100 -Subject: [PATCH 127/328] softirq: Avoid "local_softirq_pending" messages if - task is in cpu_chill() - -If the softirq thread enters cpu_chill() then ->state is UNINTERRUPTIBLE -and has no ->pi_blocked_on set and so its mask is not taken into account. - -->sleeping_lock is increased by cpu_chill() since it is also requried to -avoid a splat by RCU in case cpu_chill() is used while a RCU-read lock -is held. Use the same mechanism for the softirq-pending check. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/softirq.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/kernel/softirq.c b/kernel/softirq.c -index 1920985eeb09..27a4bb2303d0 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -105,9 +105,12 @@ static bool softirq_check_runner_tsk(struct task_struct *tsk, - * _before_ it sets pi_blocked_on to NULL under - * tsk->pi_lock. So we need to check for both: state - * and pi_blocked_on. -+ * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the -+ * task does cpu_chill(). - */ - raw_spin_lock(&tsk->pi_lock); -- if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { -+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING || -+ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) { - /* Clear all bits pending in that task */ - *pending &= ~(tsk->softirqs_raised); - ret = true; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch b/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch deleted file mode 100644 index 535093120..000000000 --- a/kernel/patches-4.19.x-rt/0128-rtmutex-trylock-is-okay-on-RT.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0666dfeb48e54032c73ac843af0c02be5353c0dc Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 2 Dec 2015 11:34:07 +0100 -Subject: [PATCH 128/328] rtmutex: trylock is okay on -RT - -non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On --RT we don't run softirqs in IRQ context but in thread context so it is -not a issue here. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 9562aaa2afdc..72abe7c121fa 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1583,7 +1583,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) - { - int ret; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (WARN_ON_ONCE(in_irq() || in_nmi())) -+#else - if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) -+#endif - return 0; - - ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch deleted file mode 100644 index 7c2ccde1e..000000000 --- a/kernel/patches-4.19.x-rt/0129-fs-nfs-turn-rmdir_sem-into-a-semaphore.patch +++ /dev/null @@ -1,150 +0,0 @@ -From 8b0d3978951a7ad39608fdcbfd291e6ba717f86a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 15 Sep 2016 10:51:27 +0200 -Subject: [PATCH 129/328] fs/nfs: turn rmdir_sem into a semaphore - -The RW semaphore had a reader side which used the _non_owner version -because it most likely took the reader lock in one thread and released it -in another which would cause lockdep to complain if the "regular" -version was used. -On -RT we need the owner because the rw lock is turned into a rtmutex. -The semaphores on the hand are "plain simple" and should work as -expected. We can't have multiple readers but on -RT we don't allow -multiple readers anyway so that is not a loss. - -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/nfs/dir.c | 8 ++++++++ - fs/nfs/inode.c | 4 ++++ - fs/nfs/unlink.c | 31 +++++++++++++++++++++++++++---- - include/linux/nfs_fs.h | 4 ++++ - 4 files changed, 43 insertions(+), 4 deletions(-) - -diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index 4ae726e70d87..c60b20884c45 100644 ---- a/fs/nfs/dir.c -+++ b/fs/nfs/dir.c -@@ -1836,7 +1836,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) - - trace_nfs_rmdir_enter(dir, dentry); - if (d_really_is_positive(dentry)) { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ down(&NFS_I(d_inode(dentry))->rmdir_sem); -+#else - down_write(&NFS_I(d_inode(dentry))->rmdir_sem); -+#endif - error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); - /* Ensure the VFS deletes this inode */ - switch (error) { -@@ -1846,7 +1850,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) - case -ENOENT: - nfs_dentry_handle_enoent(dentry); - } -+#ifdef CONFIG_PREEMPT_RT_BASE -+ up(&NFS_I(d_inode(dentry))->rmdir_sem); -+#else - up_write(&NFS_I(d_inode(dentry))->rmdir_sem); -+#endif - } else - error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); - trace_nfs_rmdir_exit(dir, dentry, error); -diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c -index e4cd3a2fe698..6f22c8d65760 100644 ---- a/fs/nfs/inode.c -+++ b/fs/nfs/inode.c -@@ -2104,7 +2104,11 @@ static void init_once(void *foo) - atomic_long_set(&nfsi->nrequests, 0); - atomic_long_set(&nfsi->commit_info.ncommit, 0); - atomic_set(&nfsi->commit_info.rpcs_out, 0); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ sema_init(&nfsi->rmdir_sem, 1); -+#else - init_rwsem(&nfsi->rmdir_sem); -+#endif - mutex_init(&nfsi->commit_mutex); - nfs4_init_once(nfsi); - } -diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c -index fd61bf0fce63..ce9100b5604d 100644 ---- a/fs/nfs/unlink.c -+++ b/fs/nfs/unlink.c -@@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) - rpc_restart_call_prepare(task); - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void nfs_down_anon(struct semaphore *sema) -+{ -+ down(sema); -+} -+ -+static void nfs_up_anon(struct semaphore *sema) -+{ -+ up(sema); -+} -+ -+#else -+static void nfs_down_anon(struct rw_semaphore *rwsem) -+{ -+ down_read_non_owner(rwsem); -+} -+ -+static void nfs_up_anon(struct rw_semaphore *rwsem) -+{ -+ up_read_non_owner(rwsem); -+} -+#endif -+ - /** - * nfs_async_unlink_release - Release the sillydelete data. - * @task: rpc_task of the sillydelete -@@ -65,7 +88,7 @@ static void nfs_async_unlink_release(void *calldata) - struct dentry *dentry = data->dentry; - struct super_block *sb = dentry->d_sb; - -- up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); -+ nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); - d_lookup_done(dentry); - nfs_free_unlinkdata(data); - dput(dentry); -@@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf - struct inode *dir = d_inode(dentry->d_parent); - struct dentry *alias; - -- down_read_non_owner(&NFS_I(dir)->rmdir_sem); -+ nfs_down_anon(&NFS_I(dir)->rmdir_sem); - alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); - if (IS_ERR(alias)) { -- up_read_non_owner(&NFS_I(dir)->rmdir_sem); -+ nfs_up_anon(&NFS_I(dir)->rmdir_sem); - return 0; - } - if (!d_in_lookup(alias)) { -@@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct inode *inode, struct nf - ret = 0; - spin_unlock(&alias->d_lock); - dput(alias); -- up_read_non_owner(&NFS_I(dir)->rmdir_sem); -+ nfs_up_anon(&NFS_I(dir)->rmdir_sem); - /* - * If we'd displaced old cached devname, free it. At that - * point dentry is definitely not a root, so we won't need -diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h -index a0831e9d19c9..94b6fefd90b0 100644 ---- a/include/linux/nfs_fs.h -+++ b/include/linux/nfs_fs.h -@@ -163,7 +163,11 @@ struct nfs_inode { - - /* Readers: in-flight sillydelete RPC calls */ - /* Writers: rmdir */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct semaphore rmdir_sem; -+#else - struct rw_semaphore rmdir_sem; -+#endif - struct mutex commit_mutex; - - #if IS_ENABLED(CONFIG_NFS_V4) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch b/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch deleted file mode 100644 index 9216f5233..000000000 --- a/kernel/patches-4.19.x-rt/0130-rtmutex-Handle-the-various-new-futex-race-conditions.patch +++ /dev/null @@ -1,254 +0,0 @@ -From 1ed0026aa6e48c934e7c081a04d36fe44b9b7fa1 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 10 Jun 2011 11:04:15 +0200 -Subject: [PATCH 130/328] rtmutex: Handle the various new futex race conditions - -RT opens a few new interesting race conditions in the rtmutex/futex -combo due to futex hash bucket lock being a 'sleeping' spinlock and -therefor not disabling preemption. - -Signed-off-by: Thomas Gleixner ---- - kernel/futex.c | 77 ++++++++++++++++++++++++++------- - kernel/locking/rtmutex.c | 36 ++++++++++++--- - kernel/locking/rtmutex_common.h | 2 + - 3 files changed, 94 insertions(+), 21 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index 23e1f8a478e8..5ec49f862c53 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -2149,6 +2149,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - requeue_pi_wake_futex(this, &key2, hb2); - drop_count++; - continue; -+ } else if (ret == -EAGAIN) { -+ /* -+ * Waiter was woken by timeout or -+ * signal and has set pi_blocked_on to -+ * PI_WAKEUP_INPROGRESS before we -+ * tried to enqueue it on the rtmutex. -+ */ -+ this->pi_state = NULL; -+ put_pi_state(pi_state); -+ continue; - } else if (ret) { - /* - * rt_mutex_start_proxy_lock() detected a -@@ -3227,7 +3237,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - struct hrtimer_sleeper timeout, *to = NULL; - struct futex_pi_state *pi_state = NULL; - struct rt_mutex_waiter rt_waiter; -- struct futex_hash_bucket *hb; -+ struct futex_hash_bucket *hb, *hb2; - union futex_key key2 = FUTEX_KEY_INIT; - struct futex_q q = futex_q_init; - int res, ret; -@@ -3285,20 +3295,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue_me(hb, &q, to); - -- spin_lock(&hb->lock); -- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -- spin_unlock(&hb->lock); -- if (ret) -- goto out_put_keys; -+ /* -+ * On RT we must avoid races with requeue and trying to block -+ * on two mutexes (hb->lock and uaddr2's rtmutex) by -+ * serializing access to pi_blocked_on with pi_lock. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ if (current->pi_blocked_on) { -+ /* -+ * We have been requeued or are in the process of -+ * being requeued. -+ */ -+ raw_spin_unlock_irq(¤t->pi_lock); -+ } else { -+ /* -+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS -+ * prevents a concurrent requeue from moving us to the -+ * uaddr2 rtmutex. After that we can safely acquire -+ * (and possibly block on) hb->lock. -+ */ -+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ spin_lock(&hb->lock); -+ -+ /* -+ * Clean up pi_blocked_on. We might leak it otherwise -+ * when we succeeded with the hb->lock in the fast -+ * path. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ current->pi_blocked_on = NULL; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -+ spin_unlock(&hb->lock); -+ if (ret) -+ goto out_put_keys; -+ } - - /* -- * In order for us to be here, we know our q.key == key2, and since -- * we took the hb->lock above, we also know that futex_requeue() has -- * completed and we no longer have to concern ourselves with a wakeup -- * race with the atomic proxy lock acquisition by the requeue code. The -- * futex_requeue dropped our key1 reference and incremented our key2 -- * reference count. -+ * In order to be here, we have either been requeued, are in -+ * the process of being requeued, or requeue successfully -+ * acquired uaddr2 on our behalf. If pi_blocked_on was -+ * non-null above, we may be racing with a requeue. Do not -+ * rely on q->lock_ptr to be hb2->lock until after blocking on -+ * hb->lock or hb2->lock. The futex_requeue dropped our key1 -+ * reference and incremented our key2 reference count. - */ -+ hb2 = hash_futex(&key2); - - /* Check if the requeue code acquired the second futex for us. */ - if (!q.rt_waiter) { -@@ -3307,7 +3352,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -- spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; -@@ -3318,7 +3364,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * the requeue_pi() code acquired for us. - */ - put_pi_state(q.pi_state); -- spin_unlock(q.lock_ptr); -+ spin_unlock(&hb2->lock); - } - } else { - struct rt_mutex *pi_mutex; -@@ -3332,7 +3378,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - pi_mutex = &q.pi_state->pi_mutex; - ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - -- spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) - ret = 0; - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 72abe7c121fa..71d161c93b98 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -135,6 +135,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) - WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); - } - -+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) -+{ -+ return waiter && waiter != PI_WAKEUP_INPROGRESS; -+} -+ - /* - * We can speed up the acquire/release, if there's no debugging state to be - * set up. -@@ -379,7 +384,8 @@ int max_lock_depth = 1024; - - static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) - { -- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; -+ return rt_mutex_real_waiter(p->pi_blocked_on) ? -+ p->pi_blocked_on->lock : NULL; - } - - /* -@@ -515,7 +521,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, - * reached or the state of the chain has changed while we - * dropped the locks. - */ -- if (!waiter) -+ if (!rt_mutex_real_waiter(waiter)) - goto out_unlock_pi; - - /* -@@ -951,6 +957,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - return -EDEADLK; - - raw_spin_lock(&task->pi_lock); -+ /* -+ * In the case of futex requeue PI, this will be a proxy -+ * lock. The task will wake unaware that it is enqueueed on -+ * this lock. Avoid blocking on two locks and corrupting -+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS -+ * flag. futex_wait_requeue_pi() sets this when it wakes up -+ * before requeue (due to a signal or timeout). Do not enqueue -+ * the task if PI_WAKEUP_INPROGRESS is set. -+ */ -+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { -+ raw_spin_unlock(&task->pi_lock); -+ return -EAGAIN; -+ } -+ -+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); -+ - waiter->task = task; - waiter->lock = lock; - waiter->prio = task->prio; -@@ -974,7 +996,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - rt_mutex_enqueue_pi(owner, waiter); - - rt_mutex_adjust_prio(owner); -- if (owner->pi_blocked_on) -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) - chain_walk = 1; - } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { - chain_walk = 1; -@@ -1070,7 +1092,7 @@ static void remove_waiter(struct rt_mutex *lock, - { - bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); - struct task_struct *owner = rt_mutex_owner(lock); -- struct rt_mutex *next_lock; -+ struct rt_mutex *next_lock = NULL; - - lockdep_assert_held(&lock->wait_lock); - -@@ -1096,7 +1118,8 @@ static void remove_waiter(struct rt_mutex *lock, - rt_mutex_adjust_prio(owner); - - /* Store the lock on which owner is blocked or NULL */ -- next_lock = task_blocked_on_lock(owner); -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) -+ next_lock = task_blocked_on_lock(owner); - - raw_spin_unlock(&owner->pi_lock); - -@@ -1132,7 +1155,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) - raw_spin_lock_irqsave(&task->pi_lock, flags); - - waiter = task->pi_blocked_on; -- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { -+ if (!rt_mutex_real_waiter(waiter) || -+ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return; - } -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index d1d62f942be2..f4b6596d224a 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -130,6 +130,8 @@ enum rtmutex_chainwalk { - /* - * PI-futex support (proxy locking functions, etc.): - */ -+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -+ - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch deleted file mode 100644 index e05c4a63a..000000000 --- a/kernel/patches-4.19.x-rt/0131-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch +++ /dev/null @@ -1,120 +0,0 @@ -From 73f0f3253d80c3fc291c3198becd7054215494e3 Mon Sep 17 00:00:00 2001 -From: Steven Rostedt -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 131/328] futex: Fix bug on when a requeued RT task times out - -Requeue with timeout causes a bug with PREEMPT_RT_FULL. - -The bug comes from a timed out condition. - - - TASK 1 TASK 2 - ------ ------ - futex_wait_requeue_pi() - futex_wait_queue_me() - - - double_lock_hb(); - - raw_spin_lock(pi_lock); - if (current->pi_blocked_on) { - } else { - current->pi_blocked_on = PI_WAKE_INPROGRESS; - run_spin_unlock(pi_lock); - spin_lock(hb->lock); <-- blocked! - - - plist_for_each_entry_safe(this) { - rt_mutex_start_proxy_lock(); - task_blocks_on_rt_mutex(); - BUG_ON(task->pi_blocked_on)!!!! - -The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the -problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to -grab the hb->lock, which it fails to do so. As the hb->lock is a mutex, -it will block and set the "pi_blocked_on" to the hb->lock. - -When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails -because the task1's pi_blocked_on is no longer set to that, but instead, -set to the hb->lock. - -The fix: - -When calling rt_mutex_start_proxy_lock() a check is made to see -if the proxy tasks pi_blocked_on is set. If so, exit out early. -Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies -the proxy task that it is being requeued, and will handle things -appropriately. - - -Signed-off-by: Steven Rostedt -Signed-off-by: Thomas Gleixner ---- - kernel/locking/rtmutex.c | 31 ++++++++++++++++++++++++++++++- - kernel/locking/rtmutex_common.h | 1 + - 2 files changed, 31 insertions(+), 1 deletion(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 71d161c93b98..1c3f56d3d9b6 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -137,7 +137,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) - - static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) - { -- return waiter && waiter != PI_WAKEUP_INPROGRESS; -+ return waiter && waiter != PI_WAKEUP_INPROGRESS && -+ waiter != PI_REQUEUE_INPROGRESS; - } - - /* -@@ -1784,6 +1785,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - if (try_to_take_rt_mutex(lock, task, NULL)) - return 1; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * In PREEMPT_RT there's an added race. -+ * If the task, that we are about to requeue, times out, -+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue -+ * to skip this task. But right after the task sets -+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then -+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. -+ * This will replace the PI_WAKEUP_INPROGRESS with the actual -+ * lock that it blocks on. We *must not* place this task -+ * on this proxy lock in that case. -+ * -+ * To prevent this race, we first take the task's pi_lock -+ * and check if it has updated its pi_blocked_on. If it has, -+ * we assume that it woke up and we return -EAGAIN. -+ * Otherwise, we set the task's pi_blocked_on to -+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up -+ * it will know that we are in the process of requeuing it. -+ */ -+ raw_spin_lock(&task->pi_lock); -+ if (task->pi_blocked_on) { -+ raw_spin_unlock(&task->pi_lock); -+ return -EAGAIN; -+ } -+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; -+ raw_spin_unlock(&task->pi_lock); -+#endif -+ - /* We enforce deadlock detection for futexes */ - ret = task_blocks_on_rt_mutex(lock, waiter, task, - RT_MUTEX_FULL_CHAINWALK); -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index f4b6596d224a..461527f3f7af 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -131,6 +131,7 @@ enum rtmutex_chainwalk { - * PI-futex support (proxy locking functions, etc.): - */ - #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) - - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch deleted file mode 100644 index 68d5f3634..000000000 --- a/kernel/patches-4.19.x-rt/0132-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch +++ /dev/null @@ -1,49 +0,0 @@ -From d85ad9d88f2afd388a77f8652da86554c45e9bc2 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 1 Mar 2013 11:17:42 +0100 -Subject: [PATCH 132/328] futex: Ensure lock/unlock symetry versus pi_lock and - hash bucket lock - -In exit_pi_state_list() we have the following locking construct: - - spin_lock(&hb->lock); - raw_spin_lock_irq(&curr->pi_lock); - - ... - spin_unlock(&hb->lock); - -In !RT this works, but on RT the migrate_enable() function which is -called from spin_unlock() sees atomic context due to the held pi_lock -and just decrements the migrate_disable_atomic counter of the -task. Now the next call to migrate_disable() sees the counter being -negative and issues a warning. That check should be in -migrate_enable() already. - -Fix this by dropping pi_lock before unlocking hb->lock and reaquire -pi_lock after that again. This is safe as the loop code reevaluates -head again under the pi_lock. - -Reported-by: Yong Zhang -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/futex.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/kernel/futex.c b/kernel/futex.c -index 5ec49f862c53..60be4530c767 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -921,7 +921,9 @@ void exit_pi_state_list(struct task_struct *curr) - if (head->next != next) { - /* retain curr->pi_lock for the loop invariant */ - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); -+ raw_spin_unlock_irq(&curr->pi_lock); - spin_unlock(&hb->lock); -+ raw_spin_lock_irq(&curr->pi_lock); - put_pi_state(pi_state); - continue; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch b/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch deleted file mode 100644 index 95f4ff63f..000000000 --- a/kernel/patches-4.19.x-rt/0133-pid.h-include-atomic.h.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 7706f24354f883b341ed0ed1535167264b7e2315 Mon Sep 17 00:00:00 2001 -From: Grygorii Strashko -Date: Tue, 21 Jul 2015 19:43:56 +0300 -Subject: [PATCH 133/328] pid.h: include atomic.h - -This patch fixes build error: - CC kernel/pid_namespace.o -In file included from kernel/pid_namespace.c:11:0: -include/linux/pid.h: In function 'get_pid': -include/linux/pid.h:78:3: error: implicit declaration of function 'atomic_inc' [-Werror=implicit-function-declaration] - atomic_inc(&pid->count); - ^ -which happens when - CONFIG_PROVE_LOCKING=n - CONFIG_DEBUG_SPINLOCK=n - CONFIG_DEBUG_MUTEXES=n - CONFIG_DEBUG_LOCK_ALLOC=n - CONFIG_PID_NS=y - -Vanilla gets this via spinlock.h. - -Signed-off-by: Grygorii Strashko -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/pid.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/include/linux/pid.h b/include/linux/pid.h -index 14a9a39da9c7..a9026a5da196 100644 ---- a/include/linux/pid.h -+++ b/include/linux/pid.h -@@ -3,6 +3,7 @@ - #define _LINUX_PID_H - - #include -+#include - - enum pid_type - { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch b/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch deleted file mode 100644 index a873b5904..000000000 --- a/kernel/patches-4.19.x-rt/0134-arm-include-definition-for-cpumask_t.patch +++ /dev/null @@ -1,30 +0,0 @@ -From b7f97d2d6bca4bbc87c6aaaac3847a6bb96d8221 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 22 Dec 2016 17:28:33 +0100 -Subject: [PATCH 134/328] arm: include definition for cpumask_t - -This definition gets pulled in by other files. With the (later) split of -RCU and spinlock.h it won't compile anymore. -The split is done in ("rbtree: don't include the rcu header"). - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/include/asm/irq.h | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h -index 46d41140df27..c421b5b81946 100644 ---- a/arch/arm/include/asm/irq.h -+++ b/arch/arm/include/asm/irq.h -@@ -23,6 +23,8 @@ - #endif - - #ifndef __ASSEMBLY__ -+#include -+ - struct irqaction; - struct pt_regs; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch deleted file mode 100644 index 35fcb47c7..000000000 --- a/kernel/patches-4.19.x-rt/0135-locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ /dev/null @@ -1,33 +0,0 @@ -From df43e09336267ae6fd3e9e0f754a1c13bc90dabc Mon Sep 17 00:00:00 2001 -From: "Wolfgang M. Reimer" -Date: Tue, 21 Jul 2015 16:20:07 +0200 -Subject: [PATCH 135/328] locking: locktorture: Do NOT include rwlock.h - directly - -Including rwlock.h directly will cause kernel builds to fail -if CONFIG_PREEMPT_RT_FULL is defined. The correct header file -(rwlock_rt.h OR rwlock.h) will be included by spinlock.h which -is included by locktorture.c anyway. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Wolfgang M. Reimer -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/locktorture.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c -index 7d0b0ed74404..a81e6ef33a04 100644 ---- a/kernel/locking/locktorture.c -+++ b/kernel/locking/locktorture.c -@@ -29,7 +29,6 @@ - #include - #include - #include --#include - #include - #include - #include --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch b/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch deleted file mode 100644 index e20855bbf..000000000 --- a/kernel/patches-4.19.x-rt/0136-rtmutex-Add-rtmutex_lock_killable.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 8d29fec3b1d6060351dadfed4d8e4555ee42ada2 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 9 Jun 2011 11:43:52 +0200 -Subject: [PATCH 136/328] rtmutex: Add rtmutex_lock_killable() - -Add "killable" type to rtmutex. We need this since rtmutex are used as -"normal" mutexes which do use this type. - -Signed-off-by: Thomas Gleixner ---- - include/linux/rtmutex.h | 1 + - kernel/locking/rtmutex.c | 19 +++++++++++++++++++ - 2 files changed, 20 insertions(+) - -diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h -index 6fd615a0eea9..81ece6a8291a 100644 ---- a/include/linux/rtmutex.h -+++ b/include/linux/rtmutex.h -@@ -115,6 +115,7 @@ extern void rt_mutex_lock(struct rt_mutex *lock); - #endif - - extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -+extern int rt_mutex_lock_killable(struct rt_mutex *lock); - extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 1c3f56d3d9b6..a4b2af7718f8 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1562,6 +1562,25 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) - return __rt_mutex_slowtrylock(lock); - } - -+/** -+ * rt_mutex_lock_killable - lock a rt_mutex killable -+ * -+ * @lock: the rt_mutex to be locked -+ * @detect_deadlock: deadlock detection on/off -+ * -+ * Returns: -+ * 0 on success -+ * -EINTR when interrupted by a signal -+ * -EDEADLK when the lock would deadlock (when deadlock detection is on) -+ */ -+int __sched rt_mutex_lock_killable(struct rt_mutex *lock) -+{ -+ might_sleep(); -+ -+ return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock); -+} -+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); -+ - /** - * rt_mutex_timed_lock - lock a rt_mutex interruptible - * the timeout structure is provided --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch b/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch deleted file mode 100644 index 05b43f8e6..000000000 --- a/kernel/patches-4.19.x-rt/0137-rtmutex-Make-lock_killable-work.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 3543c69b949257b8409b0ebb45b2e369afcbeb9b Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sat, 1 Apr 2017 12:50:59 +0200 -Subject: [PATCH 137/328] rtmutex: Make lock_killable work - -Locking an rt mutex killable does not work because signal handling is -restricted to TASK_INTERRUPTIBLE. - -Use signal_pending_state() unconditionaly. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 19 +++++++------------ - 1 file changed, 7 insertions(+), 12 deletions(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index a4b2af7718f8..f058bb976212 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1201,18 +1201,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, - if (try_to_take_rt_mutex(lock, current, waiter)) - break; - -- /* -- * TASK_INTERRUPTIBLE checks for signals and -- * timeout. Ignored otherwise. -- */ -- if (likely(state == TASK_INTERRUPTIBLE)) { -- /* Signal pending? */ -- if (signal_pending(current)) -- ret = -EINTR; -- if (timeout && !timeout->task) -- ret = -ETIMEDOUT; -- if (ret) -- break; -+ if (timeout && !timeout->task) { -+ ret = -ETIMEDOUT; -+ break; -+ } -+ if (signal_pending_state(state, current)) { -+ ret = -EINTR; -+ break; - } - - raw_spin_unlock_irq(&lock->wait_lock); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch b/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch deleted file mode 100644 index faf1e5bb7..000000000 --- a/kernel/patches-4.19.x-rt/0138-spinlock-Split-the-lock-types-header.patch +++ /dev/null @@ -1,220 +0,0 @@ -From 144cea745b056040b555dc8819e31a8ae76714c2 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 19:34:01 +0200 -Subject: [PATCH 138/328] spinlock: Split the lock types header - -Split raw_spinlock into its own file and the remaining spinlock_t into -its own non-RT header. The non-RT header will be replaced later by sleeping -spinlocks. - -Signed-off-by: Thomas Gleixner ---- - include/linux/rwlock_types.h | 4 ++ - include/linux/spinlock_types.h | 71 +---------------------------- - include/linux/spinlock_types_nort.h | 33 ++++++++++++++ - include/linux/spinlock_types_raw.h | 55 ++++++++++++++++++++++ - 4 files changed, 94 insertions(+), 69 deletions(-) - create mode 100644 include/linux/spinlock_types_nort.h - create mode 100644 include/linux/spinlock_types_raw.h - -diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h -index 857a72ceb794..c21683f3e14a 100644 ---- a/include/linux/rwlock_types.h -+++ b/include/linux/rwlock_types.h -@@ -1,6 +1,10 @@ - #ifndef __LINUX_RWLOCK_TYPES_H - #define __LINUX_RWLOCK_TYPES_H - -+#if !defined(__LINUX_SPINLOCK_TYPES_H) -+# error "Do not include directly, include spinlock_types.h" -+#endif -+ - /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers -diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h -index 24b4e6f2c1a2..5c8664d57fb8 100644 ---- a/include/linux/spinlock_types.h -+++ b/include/linux/spinlock_types.h -@@ -9,76 +9,9 @@ - * Released under the General Public License (GPL). - */ - --#if defined(CONFIG_SMP) --# include --#else --# include --#endif -+#include - --#include -- --typedef struct raw_spinlock { -- arch_spinlock_t raw_lock; --#ifdef CONFIG_DEBUG_SPINLOCK -- unsigned int magic, owner_cpu; -- void *owner; --#endif --#ifdef CONFIG_DEBUG_LOCK_ALLOC -- struct lockdep_map dep_map; --#endif --} raw_spinlock_t; -- --#define SPINLOCK_MAGIC 0xdead4ead -- --#define SPINLOCK_OWNER_INIT ((void *)-1L) -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } --#else --# define SPIN_DEP_MAP_INIT(lockname) --#endif -- --#ifdef CONFIG_DEBUG_SPINLOCK --# define SPIN_DEBUG_INIT(lockname) \ -- .magic = SPINLOCK_MAGIC, \ -- .owner_cpu = -1, \ -- .owner = SPINLOCK_OWNER_INIT, --#else --# define SPIN_DEBUG_INIT(lockname) --#endif -- --#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -- { \ -- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -- SPIN_DEBUG_INIT(lockname) \ -- SPIN_DEP_MAP_INIT(lockname) } -- --#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -- --typedef struct spinlock { -- union { -- struct raw_spinlock rlock; -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -- struct { -- u8 __padding[LOCK_PADSIZE]; -- struct lockdep_map dep_map; -- }; --#endif -- }; --} spinlock_t; -- --#define __SPIN_LOCK_INITIALIZER(lockname) \ -- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -- --#define __SPIN_LOCK_UNLOCKED(lockname) \ -- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+#include - - #include - -diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h -new file mode 100644 -index 000000000000..f1dac1fb1d6a ---- /dev/null -+++ b/include/linux/spinlock_types_nort.h -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H -+#define __LINUX_SPINLOCK_TYPES_NORT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+/* -+ * The non RT version maps spinlocks to raw_spinlocks -+ */ -+typedef struct spinlock { -+ union { -+ struct raw_spinlock rlock; -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -+ struct { -+ u8 __padding[LOCK_PADSIZE]; -+ struct lockdep_map dep_map; -+ }; -+#endif -+ }; -+} spinlock_t; -+ -+#define __SPIN_LOCK_INITIALIZER(lockname) \ -+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -+ -+#define __SPIN_LOCK_UNLOCKED(lockname) \ -+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -+ -+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+ -+#endif -diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h -new file mode 100644 -index 000000000000..822bf64a61d3 ---- /dev/null -+++ b/include/linux/spinlock_types_raw.h -@@ -0,0 +1,55 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H -+#define __LINUX_SPINLOCK_TYPES_RAW_H -+ -+#include -+ -+#if defined(CONFIG_SMP) -+# include -+#else -+# include -+#endif -+ -+#include -+ -+typedef struct raw_spinlock { -+ arch_spinlock_t raw_lock; -+#ifdef CONFIG_DEBUG_SPINLOCK -+ unsigned int magic, owner_cpu; -+ void *owner; -+#endif -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} raw_spinlock_t; -+ -+#define SPINLOCK_MAGIC 0xdead4ead -+ -+#define SPINLOCK_OWNER_INIT ((void *)-1L) -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define SPIN_DEP_MAP_INIT(lockname) -+#endif -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define SPIN_DEBUG_INIT(lockname) \ -+ .magic = SPINLOCK_MAGIC, \ -+ .owner_cpu = -1, \ -+ .owner = SPINLOCK_OWNER_INIT, -+#else -+# define SPIN_DEBUG_INIT(lockname) -+#endif -+ -+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -+ { \ -+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -+ SPIN_DEBUG_INIT(lockname) \ -+ SPIN_DEP_MAP_INIT(lockname) } -+ -+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -+ -+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -+ -+#endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch b/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch deleted file mode 100644 index 50f0e3b66..000000000 --- a/kernel/patches-4.19.x-rt/0139-rtmutex-Avoid-include-hell.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 20ec86befc19cae2065e4b8e3dcda396081489eb Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 20:06:39 +0200 -Subject: [PATCH 139/328] rtmutex: Avoid include hell - -Include only the required raw types. This avoids pulling in the -complete spinlock header which in turn requires rtmutex.h at some point. - -Signed-off-by: Thomas Gleixner ---- - include/linux/rtmutex.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h -index 81ece6a8291a..a355289b1fa1 100644 ---- a/include/linux/rtmutex.h -+++ b/include/linux/rtmutex.h -@@ -15,7 +15,7 @@ - - #include - #include --#include -+#include - - extern int max_lock_depth; /* for sysctl */ - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch b/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch deleted file mode 100644 index 70ebaf519..000000000 --- a/kernel/patches-4.19.x-rt/0140-rbtree-don-t-include-the-rcu-header.patch +++ /dev/null @@ -1,174 +0,0 @@ -From 0c9090c07884c6d1e32a8def9f3c7fa5e889f4e3 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 26 Feb 2019 16:56:02 +0100 -Subject: [PATCH 140/328] rbtree: don't include the rcu header -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The RCU header pulls in spinlock.h and fails due not yet defined types: - -|In file included from include/linux/spinlock.h:275:0, -| from include/linux/rcupdate.h:38, -| from include/linux/rbtree.h:34, -| from include/linux/rtmutex.h:17, -| from include/linux/spinlock_types.h:18, -| from kernel/bounds.c:13: -|include/linux/rwlock_rt.h:16:38: error: unknown type name ‘rwlock_t’ -| extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -| ^ - -This patch moves the required RCU function from the rcupdate.h header file into -a new header file which can be included by both users. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/rbtree.h | 2 +- - include/linux/rcu_assign_pointer.h | 54 ++++++++++++++++++++++++++++++ - include/linux/rcupdate.h | 49 +-------------------------- - 3 files changed, 56 insertions(+), 49 deletions(-) - create mode 100644 include/linux/rcu_assign_pointer.h - -diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h -index fcbeed4053ef..2aa2aec354c2 100644 ---- a/include/linux/rbtree.h -+++ b/include/linux/rbtree.h -@@ -31,7 +31,7 @@ - - #include - #include --#include -+#include - - struct rb_node { - unsigned long __rb_parent_color; -diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h -new file mode 100644 -index 000000000000..7066962a4379 ---- /dev/null -+++ b/include/linux/rcu_assign_pointer.h -@@ -0,0 +1,54 @@ -+#ifndef __LINUX_RCU_ASSIGN_POINTER_H__ -+#define __LINUX_RCU_ASSIGN_POINTER_H__ -+#include -+#include -+ -+/** -+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable -+ * @v: The value to statically initialize with. -+ */ -+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) -+ -+/** -+ * rcu_assign_pointer() - assign to RCU-protected pointer -+ * @p: pointer to assign to -+ * @v: value to assign (publish) -+ * -+ * Assigns the specified value to the specified RCU-protected -+ * pointer, ensuring that any concurrent RCU readers will see -+ * any prior initialization. -+ * -+ * Inserts memory barriers on architectures that require them -+ * (which is most of them), and also prevents the compiler from -+ * reordering the code that initializes the structure after the pointer -+ * assignment. More importantly, this call documents which pointers -+ * will be dereferenced by RCU read-side code. -+ * -+ * In some special cases, you may use RCU_INIT_POINTER() instead -+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due -+ * to the fact that it does not constrain either the CPU or the compiler. -+ * That said, using RCU_INIT_POINTER() when you should have used -+ * rcu_assign_pointer() is a very bad thing that results in -+ * impossible-to-diagnose memory corruption. So please be careful. -+ * See the RCU_INIT_POINTER() comment header for details. -+ * -+ * Note that rcu_assign_pointer() evaluates each of its arguments only -+ * once, appearances notwithstanding. One of the "extra" evaluations -+ * is in typeof() and the other visible only to sparse (__CHECKER__), -+ * neither of which actually execute the argument. As with most cpp -+ * macros, this execute-arguments-only-once property is important, so -+ * please be careful when making changes to rcu_assign_pointer() and the -+ * other macros that it invokes. -+ */ -+#define rcu_assign_pointer(p, v) \ -+({ \ -+ uintptr_t _r_a_p__v = (uintptr_t)(v); \ -+ \ -+ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ -+ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ -+ else \ -+ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ -+ _r_a_p__v; \ -+}) -+ -+#endif -diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index 027c58cdbb6e..e6733d7911e9 100644 ---- a/include/linux/rcupdate.h -+++ b/include/linux/rcupdate.h -@@ -42,6 +42,7 @@ - #include - #include - #include -+#include - - #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) - #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) -@@ -369,54 +370,6 @@ static inline void rcu_preempt_sleep_check(void) { } - ((typeof(*p) __force __kernel *)(________p1)); \ - }) - --/** -- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable -- * @v: The value to statically initialize with. -- */ --#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) -- --/** -- * rcu_assign_pointer() - assign to RCU-protected pointer -- * @p: pointer to assign to -- * @v: value to assign (publish) -- * -- * Assigns the specified value to the specified RCU-protected -- * pointer, ensuring that any concurrent RCU readers will see -- * any prior initialization. -- * -- * Inserts memory barriers on architectures that require them -- * (which is most of them), and also prevents the compiler from -- * reordering the code that initializes the structure after the pointer -- * assignment. More importantly, this call documents which pointers -- * will be dereferenced by RCU read-side code. -- * -- * In some special cases, you may use RCU_INIT_POINTER() instead -- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due -- * to the fact that it does not constrain either the CPU or the compiler. -- * That said, using RCU_INIT_POINTER() when you should have used -- * rcu_assign_pointer() is a very bad thing that results in -- * impossible-to-diagnose memory corruption. So please be careful. -- * See the RCU_INIT_POINTER() comment header for details. -- * -- * Note that rcu_assign_pointer() evaluates each of its arguments only -- * once, appearances notwithstanding. One of the "extra" evaluations -- * is in typeof() and the other visible only to sparse (__CHECKER__), -- * neither of which actually execute the argument. As with most cpp -- * macros, this execute-arguments-only-once property is important, so -- * please be careful when making changes to rcu_assign_pointer() and the -- * other macros that it invokes. -- */ --#define rcu_assign_pointer(p, v) \ --({ \ -- uintptr_t _r_a_p__v = (uintptr_t)(v); \ -- \ -- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ -- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ -- else \ -- smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ -- _r_a_p__v; \ --}) -- - /** - * rcu_swap_protected() - swap an RCU and a regular pointer - * @rcu_ptr: RCU pointer --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch deleted file mode 100644 index 9b3afb728..000000000 --- a/kernel/patches-4.19.x-rt/0141-rtmutex-Provide-rt_mutex_slowlock_locked.patch +++ /dev/null @@ -1,144 +0,0 @@ -From 3340a41255c128e25956abc1370f0e5d1b702dda Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 16:14:22 +0200 -Subject: [PATCH 141/328] rtmutex: Provide rt_mutex_slowlock_locked() - -This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 67 +++++++++++++++++++-------------- - kernel/locking/rtmutex_common.h | 7 ++++ - 2 files changed, 45 insertions(+), 29 deletions(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index f058bb976212..921345c31161 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1244,35 +1244,16 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, - } - } - --/* -- * Slow path lock function: -- */ --static int __sched --rt_mutex_slowlock(struct rt_mutex *lock, int state, -- struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk) -+int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, -+ struct hrtimer_sleeper *timeout, -+ enum rtmutex_chainwalk chwalk, -+ struct rt_mutex_waiter *waiter) - { -- struct rt_mutex_waiter waiter; -- unsigned long flags; -- int ret = 0; -- -- rt_mutex_init_waiter(&waiter); -- -- /* -- * Technically we could use raw_spin_[un]lock_irq() here, but this can -- * be called in early boot if the cmpxchg() fast path is disabled -- * (debug, no architecture support). In this case we will acquire the -- * rtmutex with lock->wait_lock held. But we cannot unconditionally -- * enable interrupts in that early boot case. So we need to use the -- * irqsave/restore variants. -- */ -- raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ int ret; - - /* Try to acquire the lock again: */ -- if (try_to_take_rt_mutex(lock, current, NULL)) { -- raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ if (try_to_take_rt_mutex(lock, current, NULL)) - return 0; -- } - - set_current_state(state); - -@@ -1280,16 +1261,16 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, - if (unlikely(timeout)) - hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); - -- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); -+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); - - if (likely(!ret)) - /* sleep on the mutex */ -- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); -+ ret = __rt_mutex_slowlock(lock, state, timeout, waiter); - - if (unlikely(ret)) { - __set_current_state(TASK_RUNNING); -- remove_waiter(lock, &waiter); -- rt_mutex_handle_deadlock(ret, chwalk, &waiter); -+ remove_waiter(lock, waiter); -+ rt_mutex_handle_deadlock(ret, chwalk, waiter); - } - - /* -@@ -1297,6 +1278,34 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, - * unconditionally. We might have to fix that up. - */ - fixup_rt_mutex_waiters(lock); -+ return ret; -+} -+ -+/* -+ * Slow path lock function: -+ */ -+static int __sched -+rt_mutex_slowlock(struct rt_mutex *lock, int state, -+ struct hrtimer_sleeper *timeout, -+ enum rtmutex_chainwalk chwalk) -+{ -+ struct rt_mutex_waiter waiter; -+ unsigned long flags; -+ int ret = 0; -+ -+ rt_mutex_init_waiter(&waiter); -+ -+ /* -+ * Technically we could use raw_spin_[un]lock_irq() here, but this can -+ * be called in early boot if the cmpxchg() fast path is disabled -+ * (debug, no architecture support). In this case we will acquire the -+ * rtmutex with lock->wait_lock held. But we cannot unconditionally -+ * enable interrupts in that early boot case. So we need to use the -+ * irqsave/restore variants. -+ */ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ -+ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter); - - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index 461527f3f7af..cb9815f0c766 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -15,6 +15,7 @@ - - #include - #include -+#include - - /* - * This is the control structure for tasks blocked on a rt_mutex, -@@ -159,6 +160,12 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wqh); - - extern void rt_mutex_postunlock(struct wake_q_head *wake_q); -+/* RW semaphore special interface */ -+ -+int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, -+ struct hrtimer_sleeper *timeout, -+ enum rtmutex_chainwalk chwalk, -+ struct rt_mutex_waiter *waiter); - - #ifdef CONFIG_DEBUG_RT_MUTEXES - # include "rtmutex-debug.h" --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch deleted file mode 100644 index 85a1d1d01..000000000 --- a/kernel/patches-4.19.x-rt/0142-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch +++ /dev/null @@ -1,152 +0,0 @@ -From e6cb2f5352cc7339044142820ecdc55d45f97ce5 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 16:36:39 +0200 -Subject: [PATCH 142/328] rtmutex: export lockdep-less version of rt_mutex's - lock, trylock and unlock - -Required for lock implementation ontop of rtmutex. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 67 +++++++++++++++++++++------------ - kernel/locking/rtmutex_common.h | 3 ++ - 2 files changed, 46 insertions(+), 24 deletions(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 921345c31161..d732976d0f05 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1494,12 +1494,33 @@ rt_mutex_fastunlock(struct rt_mutex *lock, - rt_mutex_postunlock(&wake_q); - } - --static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) -+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) - { - might_sleep(); -+ return rt_mutex_fastlock(lock, state, rt_mutex_slowlock); -+} -+ -+/** -+ * rt_mutex_lock_state - lock a rt_mutex with a given state -+ * -+ * @lock: The rt_mutex to be locked -+ * @state: The state to set when blocking on the rt_mutex -+ */ -+static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock, -+ unsigned int subclass, int state) -+{ -+ int ret; - - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); -+ ret = __rt_mutex_lock_state(lock, state); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+ -+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) -+{ -+ rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE); - } - - #ifdef CONFIG_DEBUG_LOCK_ALLOC -@@ -1540,16 +1561,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); - */ - int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) - { -- int ret; -- -- might_sleep(); -- -- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); -- if (ret) -- mutex_release(&lock->dep_map, 1, _RET_IP_); -- -- return ret; -+ return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); - -@@ -1575,13 +1587,10 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) - * Returns: - * 0 on success - * -EINTR when interrupted by a signal -- * -EDEADLK when the lock would deadlock (when deadlock detection is on) - */ - int __sched rt_mutex_lock_killable(struct rt_mutex *lock) - { -- might_sleep(); -- -- return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock); -+ return rt_mutex_lock_state(lock, 0, TASK_KILLABLE); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); - -@@ -1616,6 +1625,18 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) - } - EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); - -+int __sched __rt_mutex_trylock(struct rt_mutex *lock) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (WARN_ON_ONCE(in_irq() || in_nmi())) -+#else -+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) -+#endif -+ return 0; -+ -+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); -+} -+ - /** - * rt_mutex_trylock - try to lock a rt_mutex - * -@@ -1631,14 +1652,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) - { - int ret; - --#ifdef CONFIG_PREEMPT_RT_FULL -- if (WARN_ON_ONCE(in_irq() || in_nmi())) --#else -- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) --#endif -- return 0; -- -- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); -+ ret = __rt_mutex_trylock(lock); - if (ret) - mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); - -@@ -1646,6 +1660,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) - } - EXPORT_SYMBOL_GPL(rt_mutex_trylock); - -+void __sched __rt_mutex_unlock(struct rt_mutex *lock) -+{ -+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock); -+} -+ - /** - * rt_mutex_unlock - unlock a rt_mutex - * -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index cb9815f0c766..5955ad2aa2a8 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -162,6 +162,9 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, - extern void rt_mutex_postunlock(struct wake_q_head *wake_q); - /* RW semaphore special interface */ - -+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); -+extern int __rt_mutex_trylock(struct rt_mutex *lock); -+extern void __rt_mutex_unlock(struct rt_mutex *lock); - int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch b/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch deleted file mode 100644 index e5e418450..000000000 --- a/kernel/patches-4.19.x-rt/0143-rtmutex-add-sleeping-lock-implementation.patch +++ /dev/null @@ -1,1207 +0,0 @@ -From 4972c101326c6e36a9f541deeab9b56e3bef9932 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 17:11:19 +0200 -Subject: [PATCH 143/328] rtmutex: add sleeping lock implementation - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/kernel.h | 4 + - include/linux/rtmutex.h | 21 +- - include/linux/sched.h | 8 + - include/linux/sched/wake_q.h | 27 +- - include/linux/spinlock_rt.h | 156 +++++++++++ - include/linux/spinlock_types_rt.h | 48 ++++ - kernel/fork.c | 1 + - kernel/futex.c | 11 +- - kernel/locking/rtmutex.c | 436 +++++++++++++++++++++++++++--- - kernel/locking/rtmutex_common.h | 14 +- - kernel/sched/core.c | 28 +- - 11 files changed, 695 insertions(+), 59 deletions(-) - create mode 100644 include/linux/spinlock_rt.h - create mode 100644 include/linux/spinlock_types_rt.h - -diff --git a/include/linux/kernel.h b/include/linux/kernel.h -index f6f94e54ab96..78f30d553037 100644 ---- a/include/linux/kernel.h -+++ b/include/linux/kernel.h -@@ -260,6 +260,9 @@ extern int _cond_resched(void); - */ - # define might_sleep() \ - do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) -+ -+# define might_sleep_no_state_check() \ -+ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) - # define sched_annotate_sleep() (current->task_state_change = 0) - #else - static inline void ___might_sleep(const char *file, int line, -@@ -267,6 +270,7 @@ extern int _cond_resched(void); - static inline void __might_sleep(const char *file, int line, - int preempt_offset) { } - # define might_sleep() do { might_resched(); } while (0) -+# define might_sleep_no_state_check() do { might_resched(); } while (0) - # define sched_annotate_sleep() do { } while (0) - #endif - -diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h -index a355289b1fa1..138bd1e183e0 100644 ---- a/include/linux/rtmutex.h -+++ b/include/linux/rtmutex.h -@@ -14,11 +14,15 @@ - #define __LINUX_RT_MUTEX_H - - #include --#include - #include -+#include - - extern int max_lock_depth; /* for sysctl */ - -+#ifdef CONFIG_DEBUG_MUTEXES -+#include -+#endif -+ - /** - * The rt_mutex structure - * -@@ -31,8 +35,8 @@ struct rt_mutex { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; --#ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; -+#ifdef CONFIG_DEBUG_RT_MUTEXES - const char *name, *file; - int line; - void *magic; -@@ -82,16 +86,23 @@ do { \ - #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) - #endif - --#define __RT_MUTEX_INITIALIZER(mutexname) \ -- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ -+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT_CACHED \ - , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ -- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} -+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) -+ -+#define __RT_MUTEX_INITIALIZER(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } - - #define DEFINE_RT_MUTEX(mutexname) \ - struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) - -+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ , .save_state = 1 } -+ - /** - * rt_mutex_is_locked - is the mutex locked - * @lock: the mutex to be queried -diff --git a/include/linux/sched.h b/include/linux/sched.h -index f4ff928e6be3..527d04f9163e 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -134,6 +134,9 @@ struct task_group; - smp_store_mb(current->state, (state_value)); \ - } while (0) - -+#define __set_current_state_no_track(state_value) \ -+ current->state = (state_value); -+ - #define set_special_state(state_value) \ - do { \ - unsigned long flags; /* may shadow */ \ -@@ -143,6 +146,7 @@ struct task_group; - current->state = (state_value); \ - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ - } while (0) -+ - #else - /* - * set_current_state() includes a barrier so that the write of current->state -@@ -187,6 +191,9 @@ struct task_group; - #define set_current_state(state_value) \ - smp_store_mb(current->state, (state_value)) - -+#define __set_current_state_no_track(state_value) \ -+ __set_current_state(state_value) -+ - /* - * set_special_state() should be used for those states when the blocking task - * can not use the regular condition based wait-loop. In that case we must -@@ -914,6 +921,7 @@ struct task_struct { - raw_spinlock_t pi_lock; - - struct wake_q_node wake_q; -+ struct wake_q_node wake_q_sleeper; - - #ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task: */ -diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h -index 10b19a192b2d..ce3ccff3d9d8 100644 ---- a/include/linux/sched/wake_q.h -+++ b/include/linux/sched/wake_q.h -@@ -47,8 +47,29 @@ static inline void wake_q_init(struct wake_q_head *head) - head->lastp = &head->first; - } - --extern void wake_q_add(struct wake_q_head *head, -- struct task_struct *task); --extern void wake_up_q(struct wake_q_head *head); -+extern void __wake_q_add(struct wake_q_head *head, -+ struct task_struct *task, bool sleeper); -+static inline void wake_q_add(struct wake_q_head *head, -+ struct task_struct *task) -+{ -+ __wake_q_add(head, task, false); -+} -+ -+static inline void wake_q_add_sleeper(struct wake_q_head *head, -+ struct task_struct *task) -+{ -+ __wake_q_add(head, task, true); -+} -+ -+extern void __wake_up_q(struct wake_q_head *head, bool sleeper); -+static inline void wake_up_q(struct wake_q_head *head) -+{ -+ __wake_up_q(head, false); -+} -+ -+static inline void wake_up_q_sleeper(struct wake_q_head *head) -+{ -+ __wake_up_q(head, true); -+} - - #endif /* _LINUX_SCHED_WAKE_Q_H */ -diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h -new file mode 100644 -index 000000000000..3696a77fa77d ---- /dev/null -+++ b/include/linux/spinlock_rt.h -@@ -0,0 +1,156 @@ -+#ifndef __LINUX_SPINLOCK_RT_H -+#define __LINUX_SPINLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+#include -+ -+extern void -+__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key); -+ -+#define spin_lock_init(slock) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(slock)->lock); \ -+ __rt_spin_lock_init(slock, #slock, &__key); \ -+} while (0) -+ -+extern void __lockfunc rt_spin_lock(spinlock_t *lock); -+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); -+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); -+extern void __lockfunc rt_spin_unlock(spinlock_t *lock); -+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); -+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock(spinlock_t *lock); -+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); -+ -+/* -+ * lockdep-less calls, for derived types like rwlock: -+ * (for trylock they can use rt_mutex_trylock() directly. -+ * Migrate disable handling must be done at the call site. -+ */ -+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); -+extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock); -+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); -+ -+#define spin_lock(lock) rt_spin_lock(lock) -+ -+#define spin_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_spin_lock(lock); \ -+ } while (0) -+ -+#define spin_lock_irq(lock) spin_lock(lock) -+ -+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) -+ -+#define spin_trylock(lock) \ -+({ \ -+ int __locked; \ -+ __locked = spin_do_trylock(lock); \ -+ __locked; \ -+}) -+ -+#ifdef CONFIG_LOCKDEP -+# define spin_lock_nested(lock, subclass) \ -+ do { \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+#define spin_lock_bh_nested(lock, subclass) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+#else -+# define spin_lock_nested(lock, subclass) spin_lock(lock) -+# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+#endif -+ -+#define spin_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+ -+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) -+{ -+ unsigned long flags = 0; -+#ifdef CONFIG_TRACE_IRQFLAGS -+ flags = rt_spin_lock_trace_flags(lock); -+#else -+ spin_lock(lock); /* lock_local */ -+#endif -+ return flags; -+} -+ -+/* FIXME: we need rt_spin_lock_nest_lock */ -+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) -+ -+#define spin_unlock(lock) rt_spin_unlock(lock) -+ -+#define spin_unlock_bh(lock) \ -+ do { \ -+ rt_spin_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define spin_unlock_irq(lock) spin_unlock(lock) -+ -+#define spin_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ spin_unlock(lock); \ -+ } while (0) -+ -+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) -+#define spin_trylock_irq(lock) spin_trylock(lock) -+ -+#define spin_trylock_irqsave(lock, flags) \ -+ rt_spin_trylock_irqsave(lock, &(flags)) -+ -+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) -+ -+#ifdef CONFIG_GENERIC_LOCKBREAK -+# define spin_is_contended(lock) ((lock)->break_lock) -+#else -+# define spin_is_contended(lock) (((void)(lock), 0)) -+#endif -+ -+static inline int spin_can_lock(spinlock_t *lock) -+{ -+ return !rt_mutex_is_locked(&lock->lock); -+} -+ -+static inline int spin_is_locked(spinlock_t *lock) -+{ -+ return rt_mutex_is_locked(&lock->lock); -+} -+ -+static inline void assert_spin_locked(spinlock_t *lock) -+{ -+ BUG_ON(!spin_is_locked(lock)); -+} -+ -+#endif -diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h -new file mode 100644 -index 000000000000..3e3d8c5f7a9a ---- /dev/null -+++ b/include/linux/spinlock_types_rt.h -@@ -0,0 +1,48 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RT_H -+#define __LINUX_SPINLOCK_TYPES_RT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+#include -+ -+/* -+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: -+ */ -+typedef struct spinlock { -+ struct rt_mutex lock; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} spinlock_t; -+ -+#ifdef CONFIG_DEBUG_RT_MUTEXES -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ .file = __FILE__, \ -+ .line = __LINE__ , \ -+ } -+#else -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ } -+#endif -+ -+/* -+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) -+*/ -+ -+#define __SPIN_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ -+ SPIN_DEP_MAP_INIT(name) } -+ -+#define DEFINE_SPINLOCK(name) \ -+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) -+ -+#endif -diff --git a/kernel/fork.c b/kernel/fork.c -index 29b54a64daf5..ecec0f8bef7e 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -900,6 +900,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) - tsk->splice_pipe = NULL; - tsk->task_frag.page = NULL; - tsk->wake_q.next = NULL; -+ tsk->wake_q_sleeper.next = NULL; - - account_kernel_stack(tsk, 1); - -diff --git a/kernel/futex.c b/kernel/futex.c -index 60be4530c767..4c448dddce3c 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -1479,6 +1479,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ - struct task_struct *new_owner; - bool postunlock = false; - DEFINE_WAKE_Q(wake_q); -+ DEFINE_WAKE_Q(wake_sleeper_q); - int ret = 0; - - new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); -@@ -1538,13 +1539,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ - pi_state->owner = new_owner; - raw_spin_unlock(&new_owner->pi_lock); - -- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); -- -+ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, -+ &wake_sleeper_q); - out_unlock: - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - - if (postunlock) -- rt_mutex_postunlock(&wake_q); -+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); - - return ret; - } -@@ -2873,7 +2874,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - goto no_block; - } - -- rt_mutex_init_waiter(&rt_waiter); -+ rt_mutex_init_waiter(&rt_waiter, false); - - /* - * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not -@@ -3266,7 +3267,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * The waiter is allocated on our stack, manipulated by the requeue - * code while we sleep on uaddr. - */ -- rt_mutex_init_waiter(&rt_waiter); -+ rt_mutex_init_waiter(&rt_waiter, false); - - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); - if (unlikely(ret != 0)) -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index d732976d0f05..88df1ff7ca2d 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -7,6 +7,11 @@ - * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner - * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt - * Copyright (C) 2006 Esben Nielsen -+ * Adaptive Spinlocks: -+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, -+ * and Peter Morreale, -+ * Adaptive Spinlocks simplification: -+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt - * - * See Documentation/locking/rt-mutex-design.txt for details. - */ -@@ -234,7 +239,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - * Only use with rt_mutex_waiter_{less,equal}() - */ - #define task_to_waiter(p) \ -- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } -+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) } - - static inline int - rt_mutex_waiter_less(struct rt_mutex_waiter *left, -@@ -274,6 +279,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, - return 1; - } - -+#define STEAL_NORMAL 0 -+#define STEAL_LATERAL 1 -+ -+static inline int -+rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode) -+{ -+ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); -+ -+ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter)) -+ return 1; -+ -+ /* -+ * Note that RT tasks are excluded from lateral-steals -+ * to prevent the introduction of an unbounded latency. -+ */ -+ if (mode == STEAL_NORMAL || rt_task(waiter->task)) -+ return 0; -+ -+ return rt_mutex_waiter_equal(waiter, top_waiter); -+} -+ - static void - rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) - { -@@ -378,6 +404,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, - return debug_rt_mutex_detect_deadlock(waiter, chwalk); - } - -+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) -+{ -+ if (waiter->savestate) -+ wake_up_lock_sleeper(waiter->task); -+ else -+ wake_up_process(waiter->task); -+} -+ - /* - * Max number of times we'll walk the boosting chain: - */ -@@ -703,13 +737,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, - * follow here. This is the end of the chain we are walking. - */ - if (!rt_mutex_owner(lock)) { -+ struct rt_mutex_waiter *lock_top_waiter; -+ - /* - * If the requeue [7] above changed the top waiter, - * then we need to wake the new top waiter up to try - * to get the lock. - */ -- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) -- wake_up_process(rt_mutex_top_waiter(lock)->task); -+ lock_top_waiter = rt_mutex_top_waiter(lock); -+ if (prerequeue_top_waiter != lock_top_waiter) -+ rt_mutex_wake_waiter(lock_top_waiter); - raw_spin_unlock_irq(&lock->wait_lock); - return 0; - } -@@ -811,9 +848,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, - * @task: The task which wants to acquire the lock - * @waiter: The waiter that is queued to the lock's wait tree if the - * callsite called task_blocked_on_lock(), otherwise NULL -+ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL) - */ --static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, -- struct rt_mutex_waiter *waiter) -+static int __try_to_take_rt_mutex(struct rt_mutex *lock, -+ struct task_struct *task, -+ struct rt_mutex_waiter *waiter, int mode) - { - lockdep_assert_held(&lock->wait_lock); - -@@ -849,12 +888,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - */ - if (waiter) { - /* -- * If waiter is not the highest priority waiter of -- * @lock, give up. -+ * If waiter is not the highest priority waiter of @lock, -+ * or its peer when lateral steal is allowed, give up. - */ -- if (waiter != rt_mutex_top_waiter(lock)) -+ if (!rt_mutex_steal(lock, waiter, mode)) - return 0; -- - /* - * We can acquire the lock. Remove the waiter from the - * lock waiters tree. -@@ -872,14 +910,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - */ - if (rt_mutex_has_waiters(lock)) { - /* -- * If @task->prio is greater than or equal to -- * the top waiter priority (kernel view), -- * @task lost. -+ * If @task->prio is greater than the top waiter -+ * priority (kernel view), or equal to it when a -+ * lateral steal is forbidden, @task lost. - */ -- if (!rt_mutex_waiter_less(task_to_waiter(task), -- rt_mutex_top_waiter(lock))) -+ if (!rt_mutex_steal(lock, task_to_waiter(task), mode)) - return 0; -- - /* - * The current top waiter stays enqueued. We - * don't have to change anything in the lock -@@ -926,6 +962,296 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - return 1; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * preemptible spin_lock functions: -+ */ -+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, -+ void (*slowfn)(struct rt_mutex *lock)) -+{ -+ might_sleep_no_state_check(); -+ -+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) -+ return; -+ else -+ slowfn(lock); -+} -+ -+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, -+ void (*slowfn)(struct rt_mutex *lock)) -+{ -+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) -+ return; -+ else -+ slowfn(lock); -+} -+#ifdef CONFIG_SMP -+/* -+ * Note that owner is a speculative pointer and dereferencing relies -+ * on rcu_read_lock() and the check against the lock owner. -+ */ -+static int adaptive_wait(struct rt_mutex *lock, -+ struct task_struct *owner) -+{ -+ int res = 0; -+ -+ rcu_read_lock(); -+ for (;;) { -+ if (owner != rt_mutex_owner(lock)) -+ break; -+ /* -+ * Ensure that owner->on_cpu is dereferenced _after_ -+ * checking the above to be valid. -+ */ -+ barrier(); -+ if (!owner->on_cpu) { -+ res = 1; -+ break; -+ } -+ cpu_relax(); -+ } -+ rcu_read_unlock(); -+ return res; -+} -+#else -+static int adaptive_wait(struct rt_mutex *lock, -+ struct task_struct *orig_owner) -+{ -+ return 1; -+} -+#endif -+ -+static int task_blocks_on_rt_mutex(struct rt_mutex *lock, -+ struct rt_mutex_waiter *waiter, -+ struct task_struct *task, -+ enum rtmutex_chainwalk chwalk); -+/* -+ * Slow path lock function spin_lock style: this variant is very -+ * careful not to miss any non-lock wakeups. -+ * -+ * We store the current state under p->pi_lock in p->saved_state and -+ * the try_to_wake_up() code handles this accordingly. -+ */ -+void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, -+ struct rt_mutex_waiter *waiter, -+ unsigned long flags) -+{ -+ struct task_struct *lock_owner, *self = current; -+ struct rt_mutex_waiter *top_waiter; -+ int ret; -+ -+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) -+ return; -+ -+ BUG_ON(rt_mutex_owner(lock) == self); -+ -+ /* -+ * We save whatever state the task is in and we'll restore it -+ * after acquiring the lock taking real wakeups into account -+ * as well. We are serialized via pi_lock against wakeups. See -+ * try_to_wake_up(). -+ */ -+ raw_spin_lock(&self->pi_lock); -+ self->saved_state = self->state; -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock(&self->pi_lock); -+ -+ ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK); -+ BUG_ON(ret); -+ -+ for (;;) { -+ /* Try to acquire the lock again. */ -+ if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL)) -+ break; -+ -+ top_waiter = rt_mutex_top_waiter(lock); -+ lock_owner = rt_mutex_owner(lock); -+ -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ -+ debug_rt_mutex_print_deadlock(waiter); -+ -+ if (top_waiter != waiter || adaptive_wait(lock, lock_owner)) -+ schedule(); -+ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ -+ raw_spin_lock(&self->pi_lock); -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock(&self->pi_lock); -+ } -+ -+ /* -+ * Restore the task state to current->saved_state. We set it -+ * to the original state above and the try_to_wake_up() code -+ * has possibly updated it when a real (non-rtmutex) wakeup -+ * happened while we were blocked. Clear saved_state so -+ * try_to_wakeup() does not get confused. -+ */ -+ raw_spin_lock(&self->pi_lock); -+ __set_current_state_no_track(self->saved_state); -+ self->saved_state = TASK_RUNNING; -+ raw_spin_unlock(&self->pi_lock); -+ -+ /* -+ * try_to_take_rt_mutex() sets the waiter bit -+ * unconditionally. We might have to fix that up: -+ */ -+ fixup_rt_mutex_waiters(lock); -+ -+ BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock)); -+ BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry)); -+} -+ -+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) -+{ -+ struct rt_mutex_waiter waiter; -+ unsigned long flags; -+ -+ rt_mutex_init_waiter(&waiter, true); -+ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ rt_spin_lock_slowlock_locked(lock, &waiter, flags); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ debug_rt_mutex_free_waiter(&waiter); -+} -+ -+static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, -+ struct wake_q_head *wake_q, -+ struct wake_q_head *wq_sleeper); -+/* -+ * Slow path to release a rt_mutex spin_lock style -+ */ -+void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) -+{ -+ unsigned long flags; -+ DEFINE_WAKE_Q(wake_q); -+ DEFINE_WAKE_Q(wake_sleeper_q); -+ bool postunlock; -+ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ -+ if (postunlock) -+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); -+} -+ -+void __lockfunc rt_spin_lock(spinlock_t *lock) -+{ -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -+} -+EXPORT_SYMBOL(rt_spin_lock); -+ -+void __lockfunc __rt_spin_lock(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); -+} -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) -+{ -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -+} -+EXPORT_SYMBOL(rt_spin_lock_nested); -+#endif -+ -+void __lockfunc rt_spin_unlock(spinlock_t *lock) -+{ -+ /* NOTE: we always pass in '1' for nested, for simplicity */ -+ spin_release(&lock->dep_map, 1, _RET_IP_); -+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); -+ migrate_enable(); -+} -+EXPORT_SYMBOL(rt_spin_unlock); -+ -+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); -+} -+EXPORT_SYMBOL(__rt_spin_unlock); -+ -+/* -+ * Wait for the lock to get unlocked: instead of polling for an unlock -+ * (like raw spinlocks do), we lock and unlock, to force the kernel to -+ * schedule if there's contention: -+ */ -+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) -+{ -+ spin_lock(lock); -+ spin_unlock(lock); -+} -+EXPORT_SYMBOL(rt_spin_unlock_wait); -+ -+int __lockfunc rt_spin_trylock(spinlock_t *lock) -+{ -+ int ret; -+ -+ migrate_disable(); -+ ret = __rt_mutex_trylock(&lock->lock); -+ if (ret) -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock); -+ -+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) -+{ -+ int ret; -+ -+ local_bh_disable(); -+ ret = __rt_mutex_trylock(&lock->lock); -+ if (ret) { -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ } else -+ local_bh_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock_bh); -+ -+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) -+{ -+ int ret; -+ -+ *flags = 0; -+ ret = __rt_mutex_trylock(&lock->lock); -+ if (ret) { -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock_irqsave); -+ -+void -+__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); -+ lockdep_init_map(&lock->dep_map, name, key, 0); -+#endif -+} -+EXPORT_SYMBOL(__rt_spin_lock_init); -+ -+#endif /* PREEMPT_RT_FULL */ -+ -+static inline int -+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, -+ struct rt_mutex_waiter *waiter) -+{ -+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); -+} -+ - /* - * Task blocks on lock. - * -@@ -1039,6 +1365,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - * Called with lock->wait_lock held and interrupts disabled. - */ - static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q, - struct rt_mutex *lock) - { - struct rt_mutex_waiter *waiter; -@@ -1078,7 +1405,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, - * Pairs with preempt_enable() in rt_mutex_postunlock(); - */ - preempt_disable(); -- wake_q_add(wake_q, waiter->task); -+ if (waiter->savestate) -+ wake_q_add_sleeper(wake_sleeper_q, waiter->task); -+ else -+ wake_q_add(wake_q, waiter->task); - raw_spin_unlock(¤t->pi_lock); - } - -@@ -1162,21 +1492,22 @@ void rt_mutex_adjust_pi(struct task_struct *task) - return; - } - next_lock = waiter->lock; -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); - - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(task); - -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); - rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, - next_lock, NULL, task); - } - --void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) -+void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) - { - debug_rt_mutex_init_waiter(waiter); - RB_CLEAR_NODE(&waiter->pi_tree_entry); - RB_CLEAR_NODE(&waiter->tree_entry); - waiter->task = NULL; -+ waiter->savestate = savestate; - } - - /** -@@ -1293,7 +1624,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, - unsigned long flags; - int ret = 0; - -- rt_mutex_init_waiter(&waiter); -+ rt_mutex_init_waiter(&waiter, false); - - /* - * Technically we could use raw_spin_[un]lock_irq() here, but this can -@@ -1366,7 +1697,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) - * Return whether the current task needs to call rt_mutex_postunlock(). - */ - static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, -- struct wake_q_head *wake_q) -+ struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q) - { - unsigned long flags; - -@@ -1420,7 +1752,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, - * - * Queue the next waiter for wakeup once we release the wait_lock. - */ -- mark_wakeup_next_waiter(wake_q, lock); -+ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - return true; /* call rt_mutex_postunlock() */ -@@ -1472,9 +1804,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, - /* - * Performs the wakeup of the the top-waiter and re-enables preemption. - */ --void rt_mutex_postunlock(struct wake_q_head *wake_q) -+void rt_mutex_postunlock(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q) - { - wake_up_q(wake_q); -+ wake_up_q_sleeper(wake_sleeper_q); - - /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ - preempt_enable(); -@@ -1483,15 +1817,17 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) - static inline void - rt_mutex_fastunlock(struct rt_mutex *lock, - bool (*slowfn)(struct rt_mutex *lock, -- struct wake_q_head *wqh)) -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper)) - { - DEFINE_WAKE_Q(wake_q); -+ DEFINE_WAKE_Q(wake_sleeper_q); - - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) - return; - -- if (slowfn(lock, &wake_q)) -- rt_mutex_postunlock(&wake_q); -+ if (slowfn(lock, &wake_q, &wake_sleeper_q)) -+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); - } - - int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) -@@ -1673,16 +2009,13 @@ void __sched __rt_mutex_unlock(struct rt_mutex *lock) - void __sched rt_mutex_unlock(struct rt_mutex *lock) - { - mutex_release(&lock->dep_map, 1, _RET_IP_); -- rt_mutex_fastunlock(lock, rt_mutex_slowunlock); -+ __rt_mutex_unlock(lock); - } - EXPORT_SYMBOL_GPL(rt_mutex_unlock); - --/** -- * Futex variant, that since futex variants do not use the fast-path, can be -- * simple and will not need to retry. -- */ --bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, -- struct wake_q_head *wake_q) -+static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, -+ struct wake_q_head *wake_q, -+ struct wake_q_head *wq_sleeper) - { - lockdep_assert_held(&lock->wait_lock); - -@@ -1699,23 +2032,35 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, - * avoid inversion prior to the wakeup. preempt_disable() - * therein pairs with rt_mutex_postunlock(). - */ -- mark_wakeup_next_waiter(wake_q, lock); -+ mark_wakeup_next_waiter(wake_q, wq_sleeper, lock); - - return true; /* call postunlock() */ - } - -+/** -+ * Futex variant, that since futex variants do not use the fast-path, can be -+ * simple and will not need to retry. -+ */ -+bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, -+ struct wake_q_head *wake_q, -+ struct wake_q_head *wq_sleeper) -+{ -+ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper); -+} -+ - void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) - { - DEFINE_WAKE_Q(wake_q); -+ DEFINE_WAKE_Q(wake_sleeper_q); - unsigned long flags; - bool postunlock; - - raw_spin_lock_irqsave(&lock->wait_lock, flags); -- postunlock = __rt_mutex_futex_unlock(lock, &wake_q); -+ postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - if (postunlock) -- rt_mutex_postunlock(&wake_q); -+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); - } - - /** -@@ -1754,7 +2099,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name, - if (name && key) - debug_rt_mutex_init(lock, name, key); - } --EXPORT_SYMBOL_GPL(__rt_mutex_init); -+EXPORT_SYMBOL(__rt_mutex_init); - - /** - * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a -@@ -1949,6 +2294,7 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *to, - struct rt_mutex_waiter *waiter) - { -+ struct task_struct *tsk = current; - int ret; - - raw_spin_lock_irq(&lock->wait_lock); -@@ -1960,6 +2306,24 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - * have to fix that up. - */ - fixup_rt_mutex_waiters(lock); -+ /* -+ * RT has a problem here when the wait got interrupted by a timeout -+ * or a signal. task->pi_blocked_on is still set. The task must -+ * acquire the hash bucket lock when returning from this function. -+ * -+ * If the hash bucket lock is contended then the -+ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in -+ * task_blocks_on_rt_mutex() will trigger. This can be avoided by -+ * clearing task->pi_blocked_on which removes the task from the -+ * boosting chain of the rtmutex. That's correct because the task -+ * is not longer blocked on it. -+ */ -+ if (ret) { -+ raw_spin_lock(&tsk->pi_lock); -+ tsk->pi_blocked_on = NULL; -+ raw_spin_unlock(&tsk->pi_lock); -+ } -+ - raw_spin_unlock_irq(&lock->wait_lock); - - return ret; -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index 5955ad2aa2a8..6fcf0a3e180d 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -30,6 +30,7 @@ struct rt_mutex_waiter { - struct rb_node pi_tree_entry; - struct task_struct *task; - struct rt_mutex *lock; -+ bool savestate; - #ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; - struct pid *deadlock_task_pid; -@@ -139,7 +140,7 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); - extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, - struct task_struct *proxy_owner); --extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); -+extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate); - extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task); -@@ -157,9 +158,12 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex *l); - - extern void rt_mutex_futex_unlock(struct rt_mutex *lock); - extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, -- struct wake_q_head *wqh); -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper); -+ -+extern void rt_mutex_postunlock(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q); - --extern void rt_mutex_postunlock(struct wake_q_head *wake_q); - /* RW semaphore special interface */ - - extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); -@@ -169,6 +173,10 @@ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *waiter); -+void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, -+ struct rt_mutex_waiter *waiter, -+ unsigned long flags); -+void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock); - - #ifdef CONFIG_DEBUG_RT_MUTEXES - # include "rtmutex-debug.h" -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 42b42ebf52bc..6a0ccaea2b42 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -401,9 +401,15 @@ static bool set_nr_if_polling(struct task_struct *p) - #endif - #endif - --void wake_q_add(struct wake_q_head *head, struct task_struct *task) -+void __wake_q_add(struct wake_q_head *head, struct task_struct *task, -+ bool sleeper) - { -- struct wake_q_node *node = &task->wake_q; -+ struct wake_q_node *node; -+ -+ if (sleeper) -+ node = &task->wake_q_sleeper; -+ else -+ node = &task->wake_q; - - /* - * Atomically grab the task, if ->wake_q is !nil already it means -@@ -426,24 +432,32 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) - head->lastp = &node->next; - } - --void wake_up_q(struct wake_q_head *head) -+void __wake_up_q(struct wake_q_head *head, bool sleeper) - { - struct wake_q_node *node = head->first; - - while (node != WAKE_Q_TAIL) { - struct task_struct *task; - -- task = container_of(node, struct task_struct, wake_q); -+ if (sleeper) -+ task = container_of(node, struct task_struct, wake_q_sleeper); -+ else -+ task = container_of(node, struct task_struct, wake_q); - BUG_ON(!task); - /* Task can safely be re-inserted now: */ - node = node->next; -- task->wake_q.next = NULL; -- -+ if (sleeper) -+ task->wake_q_sleeper.next = NULL; -+ else -+ task->wake_q.next = NULL; - /* - * wake_up_process() executes a full barrier, which pairs with - * the queueing in wake_q_add() so as not to miss wakeups. - */ -- wake_up_process(task); -+ if (sleeper) -+ wake_up_lock_sleeper(task); -+ else -+ wake_up_process(task); - put_task_struct(task); - } - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch deleted file mode 100644 index 0bb0f434e..000000000 --- a/kernel/patches-4.19.x-rt/0144-rtmutex-add-mutex-implementation-based-on-rtmutex.patch +++ /dev/null @@ -1,382 +0,0 @@ -From ac391538d1f4dff13baf839ada70bb001c1462dd Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 17:17:03 +0200 -Subject: [PATCH 144/328] rtmutex: add mutex implementation based on rtmutex - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/mutex_rt.h | 130 ++++++++++++++++++++++ - kernel/locking/mutex-rt.c | 223 ++++++++++++++++++++++++++++++++++++++ - 2 files changed, 353 insertions(+) - create mode 100644 include/linux/mutex_rt.h - create mode 100644 kernel/locking/mutex-rt.c - -diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h -new file mode 100644 -index 000000000000..3fcb5edb1d2b ---- /dev/null -+++ b/include/linux/mutex_rt.h -@@ -0,0 +1,130 @@ -+#ifndef __LINUX_MUTEX_RT_H -+#define __LINUX_MUTEX_RT_H -+ -+#ifndef __LINUX_MUTEX_H -+#error "Please include mutex.h" -+#endif -+ -+#include -+ -+/* FIXME: Just for __lockfunc */ -+#include -+ -+struct mutex { -+ struct rt_mutex lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define __MUTEX_INITIALIZER(mutexname) \ -+ { \ -+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ -+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ -+ } -+ -+#define DEFINE_MUTEX(mutexname) \ -+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -+ -+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); -+extern void __lockfunc _mutex_lock(struct mutex *lock); -+extern void __lockfunc _mutex_lock_io(struct mutex *lock); -+extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); -+extern int __lockfunc _mutex_lock_killable(struct mutex *lock); -+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); -+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); -+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_trylock(struct mutex *lock); -+extern void __lockfunc _mutex_unlock(struct mutex *lock); -+ -+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) -+#define mutex_lock(l) _mutex_lock(l) -+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) -+#define mutex_lock_killable(l) _mutex_lock_killable(l) -+#define mutex_trylock(l) _mutex_trylock(l) -+#define mutex_unlock(l) _mutex_unlock(l) -+#define mutex_lock_io(l) _mutex_lock_io(l); -+ -+#define __mutex_owner(l) ((l)->lock.owner) -+ -+#ifdef CONFIG_DEBUG_MUTEXES -+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) -+#else -+static inline void mutex_destroy(struct mutex *lock) {} -+#endif -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible_nested(l, s) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable_nested(l, s) -+# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s) -+ -+# define mutex_lock_nest_lock(lock, nest_lock) \ -+do { \ -+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ -+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ -+} while (0) -+ -+#else -+# define mutex_lock_nested(l, s) _mutex_lock(l) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible(l) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable(l) -+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -+# define mutex_lock_io_nested(l, s) _mutex_lock_io(l) -+#endif -+ -+# define mutex_init(mutex) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), #mutex, &__key); \ -+} while (0) -+ -+# define __mutex_init(mutex, name, key) \ -+do { \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), name, key); \ -+} while (0) -+ -+/** -+ * These values are chosen such that FAIL and SUCCESS match the -+ * values of the regular mutex_trylock(). -+ */ -+enum mutex_trylock_recursive_enum { -+ MUTEX_TRYLOCK_FAILED = 0, -+ MUTEX_TRYLOCK_SUCCESS = 1, -+ MUTEX_TRYLOCK_RECURSIVE, -+}; -+/** -+ * mutex_trylock_recursive - trylock variant that allows recursive locking -+ * @lock: mutex to be locked -+ * -+ * This function should not be used, _ever_. It is purely for hysterical GEM -+ * raisins, and once those are gone this will be removed. -+ * -+ * Returns: -+ * MUTEX_TRYLOCK_FAILED - trylock failed, -+ * MUTEX_TRYLOCK_SUCCESS - lock acquired, -+ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. -+ */ -+int __rt_mutex_owner_current(struct rt_mutex *lock); -+ -+static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum -+mutex_trylock_recursive(struct mutex *lock) -+{ -+ if (unlikely(__rt_mutex_owner_current(&lock->lock))) -+ return MUTEX_TRYLOCK_RECURSIVE; -+ -+ return mutex_trylock(lock); -+} -+ -+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); -+ -+#endif -diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c -new file mode 100644 -index 000000000000..4f81595c0f52 ---- /dev/null -+++ b/kernel/locking/mutex-rt.c -@@ -0,0 +1,223 @@ -+/* -+ * kernel/rt.c -+ * -+ * Real-Time Preemption Support -+ * -+ * started by Ingo Molnar: -+ * -+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar -+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner -+ * -+ * historic credit for proving that Linux spinlocks can be implemented via -+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow -+ * and others) who prototyped it on 2.4 and did lots of comparative -+ * research and analysis; TimeSys, for proving that you can implement a -+ * fully preemptible kernel via the use of IRQ threading and mutexes; -+ * Bill Huey for persuasively arguing on lkml that the mutex model is the -+ * right one; and to MontaVista, who ported pmutexes to 2.6. -+ * -+ * This code is a from-scratch implementation and is not based on pmutexes, -+ * but the idea of converting spinlocks to mutexes is used here too. -+ * -+ * lock debugging, locking tree, deadlock detection: -+ * -+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey -+ * Released under the General Public License (GPL). -+ * -+ * Includes portions of the generic R/W semaphore implementation from: -+ * -+ * Copyright (c) 2001 David Howells (dhowells@redhat.com). -+ * - Derived partially from idea by Andrea Arcangeli -+ * - Derived also from comments by Linus -+ * -+ * Pending ownership of locks and ownership stealing: -+ * -+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt -+ * -+ * (also by Steven Rostedt) -+ * - Converted single pi_lock to individual task locks. -+ * -+ * By Esben Nielsen: -+ * Doing priority inheritance with help of the scheduler. -+ * -+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner -+ * - major rework based on Esben Nielsens initial patch -+ * - replaced thread_info references by task_struct refs -+ * - removed task->pending_owner dependency -+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks -+ * in the scheduler return path as discussed with Steven Rostedt -+ * -+ * Copyright (C) 2006, Kihon Technologies Inc. -+ * Steven Rostedt -+ * - debugged and patched Thomas Gleixner's rework. -+ * - added back the cmpxchg to the rework. -+ * - turned atomic require back on for SMP. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rtmutex_common.h" -+ -+/* -+ * struct mutex functions -+ */ -+void __mutex_do_init(struct mutex *mutex, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); -+ lockdep_init_map(&mutex->dep_map, name, key, 0); -+#endif -+ mutex->lock.save_state = 0; -+} -+EXPORT_SYMBOL(__mutex_do_init); -+ -+void __lockfunc _mutex_lock(struct mutex *lock) -+{ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); -+} -+EXPORT_SYMBOL(_mutex_lock); -+ -+void __lockfunc _mutex_lock_io(struct mutex *lock) -+{ -+ int token; -+ -+ token = io_schedule_prepare(); -+ _mutex_lock(lock); -+ io_schedule_finish(token); -+} -+EXPORT_SYMBOL_GPL(_mutex_lock_io); -+ -+int __lockfunc _mutex_lock_interruptible(struct mutex *lock) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_interruptible); -+ -+int __lockfunc _mutex_lock_killable(struct mutex *lock) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_killable); -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) -+{ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); -+} -+EXPORT_SYMBOL(_mutex_lock_nested); -+ -+void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass) -+{ -+ int token; -+ -+ token = io_schedule_prepare(); -+ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); -+ -+ io_schedule_finish(token); -+} -+EXPORT_SYMBOL_GPL(_mutex_lock_io_nested); -+ -+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) -+{ -+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); -+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE); -+} -+EXPORT_SYMBOL(_mutex_lock_nest_lock); -+ -+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) -+{ -+ int ret; -+ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_interruptible_nested); -+ -+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -+ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_killable_nested); -+#endif -+ -+int __lockfunc _mutex_trylock(struct mutex *lock) -+{ -+ int ret = __rt_mutex_trylock(&lock->lock); -+ -+ if (ret) -+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_trylock); -+ -+void __lockfunc _mutex_unlock(struct mutex *lock) -+{ -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ __rt_mutex_unlock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_unlock); -+ -+/** -+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 -+ * @cnt: the atomic which we are to dec -+ * @lock: the mutex to return holding if we dec to 0 -+ * -+ * return true and hold lock if we dec to 0, return false otherwise -+ */ -+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) -+{ -+ /* dec if we can't possibly hit 0 */ -+ if (atomic_add_unless(cnt, -1, 1)) -+ return 0; -+ /* we might hit 0, so take the lock */ -+ mutex_lock(lock); -+ if (!atomic_dec_and_test(cnt)) { -+ /* when we actually did the dec, we didn't hit 0 */ -+ mutex_unlock(lock); -+ return 0; -+ } -+ /* we hit 0, and we hold the lock */ -+ return 1; -+} -+EXPORT_SYMBOL(atomic_dec_and_mutex_lock); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch deleted file mode 100644 index 6bbf8cd7a..000000000 --- a/kernel/patches-4.19.x-rt/0145-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch +++ /dev/null @@ -1,426 +0,0 @@ -From b0c41a7abe90071ea550299c767852ba0d55a41f Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 17:28:34 +0200 -Subject: [PATCH 145/328] rtmutex: add rwsem implementation based on rtmutex - -The RT specific R/W semaphore implementation restricts the number of readers -to one because a writer cannot block on multiple readers and inherit its -priority or budget. - -The single reader restricting is painful in various ways: - - - Performance bottleneck for multi-threaded applications in the page fault - path (mmap sem) - - - Progress blocker for drivers which are carefully crafted to avoid the - potential reader/writer deadlock in mainline. - -The analysis of the writer code pathes shows, that properly written RT tasks -should not take them. Syscalls like mmap(), file access which take mmap sem -write locked have unbound latencies which are completely unrelated to mmap -sem. Other R/W sem users like graphics drivers are not suitable for RT tasks -either. - -So there is little risk to hurt RT tasks when the RT rwsem implementation is -changed in the following way: - - - Allow concurrent readers - - - Make writers block until the last reader left the critical section. This - blocking is not subject to priority/budget inheritance. - - - Readers blocked on a writer inherit their priority/budget in the normal - way. - -There is a drawback with this scheme. R/W semaphores become writer unfair -though the applications which have triggered writer starvation (mostly on -mmap_sem) in the past are not really the typical workloads running on a RT -system. So while it's unlikely to hit writer starvation, it's possible. If -there are unexpected workloads on RT systems triggering it, we need to rethink -the approach. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/rwsem_rt.h | 68 +++++++++ - kernel/locking/rwsem-rt.c | 293 ++++++++++++++++++++++++++++++++++++++ - 2 files changed, 361 insertions(+) - create mode 100644 include/linux/rwsem_rt.h - create mode 100644 kernel/locking/rwsem-rt.c - -diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h -new file mode 100644 -index 000000000000..2018ff77904a ---- /dev/null -+++ b/include/linux/rwsem_rt.h -@@ -0,0 +1,68 @@ -+#ifndef _LINUX_RWSEM_RT_H -+#define _LINUX_RWSEM_RT_H -+ -+#ifndef _LINUX_RWSEM_H -+#error "Include rwsem.h" -+#endif -+ -+#include -+#include -+ -+#define READER_BIAS (1U << 31) -+#define WRITER_BIAS (1U << 30) -+ -+struct rw_semaphore { -+ atomic_t readers; -+ struct rt_mutex rtmutex; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define __RWSEM_INITIALIZER(name) \ -+{ \ -+ .readers = ATOMIC_INIT(READER_BIAS), \ -+ .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \ -+ RW_DEP_MAP_INIT(name) \ -+} -+ -+#define DECLARE_RWSEM(lockname) \ -+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) -+ -+extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, -+ struct lock_class_key *key); -+ -+#define __init_rwsem(sem, name, key) \ -+do { \ -+ rt_mutex_init(&(sem)->rtmutex); \ -+ __rwsem_init((sem), (name), (key)); \ -+} while (0) -+ -+#define init_rwsem(sem) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __init_rwsem((sem), #sem, &__key); \ -+} while (0) -+ -+static inline int rwsem_is_locked(struct rw_semaphore *sem) -+{ -+ return atomic_read(&sem->readers) != READER_BIAS; -+} -+ -+static inline int rwsem_is_contended(struct rw_semaphore *sem) -+{ -+ return atomic_read(&sem->readers) > 0; -+} -+ -+extern void __down_read(struct rw_semaphore *sem); -+extern int __down_read_killable(struct rw_semaphore *sem); -+extern int __down_read_trylock(struct rw_semaphore *sem); -+extern void __down_write(struct rw_semaphore *sem); -+extern int __must_check __down_write_killable(struct rw_semaphore *sem); -+extern int __down_write_trylock(struct rw_semaphore *sem); -+extern void __up_read(struct rw_semaphore *sem); -+extern void __up_write(struct rw_semaphore *sem); -+extern void __downgrade_write(struct rw_semaphore *sem); -+ -+#endif -diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c -new file mode 100644 -index 000000000000..7d3c5cf3d23d ---- /dev/null -+++ b/kernel/locking/rwsem-rt.c -@@ -0,0 +1,293 @@ -+/* -+ */ -+#include -+#include -+#include -+#include -+ -+#include "rtmutex_common.h" -+ -+/* -+ * RT-specific reader/writer semaphores -+ * -+ * down_write() -+ * 1) Lock sem->rtmutex -+ * 2) Remove the reader BIAS to force readers into the slow path -+ * 3) Wait until all readers have left the critical region -+ * 4) Mark it write locked -+ * -+ * up_write() -+ * 1) Remove the write locked marker -+ * 2) Set the reader BIAS so readers can use the fast path again -+ * 3) Unlock sem->rtmutex to release blocked readers -+ * -+ * down_read() -+ * 1) Try fast path acquisition (reader BIAS is set) -+ * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag -+ * 3) If !writelocked, acquire it for read -+ * 4) If writelocked, block on sem->rtmutex -+ * 5) unlock sem->rtmutex, goto 1) -+ * -+ * up_read() -+ * 1) Try fast path release (reader count != 1) -+ * 2) Wake the writer waiting in down_write()#3 -+ * -+ * down_read()#3 has the consequence, that rw semaphores on RT are not writer -+ * fair, but writers, which should be avoided in RT tasks (think mmap_sem), -+ * are subject to the rtmutex priority/DL inheritance mechanism. -+ * -+ * It's possible to make the rw semaphores writer fair by keeping a list of -+ * active readers. A blocked writer would force all newly incoming readers to -+ * block on the rtmutex, but the rtmutex would have to be proxy locked for one -+ * reader after the other. We can't use multi-reader inheritance because there -+ * is no way to support that with SCHED_DEADLINE. Implementing the one by one -+ * reader boosting/handover mechanism is a major surgery for a very dubious -+ * value. -+ * -+ * The risk of writer starvation is there, but the pathological use cases -+ * which trigger it are not necessarily the typical RT workloads. -+ */ -+ -+void __rwsem_init(struct rw_semaphore *sem, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held semaphore: -+ */ -+ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); -+ lockdep_init_map(&sem->dep_map, name, key, 0); -+#endif -+ atomic_set(&sem->readers, READER_BIAS); -+} -+EXPORT_SYMBOL(__rwsem_init); -+ -+int __down_read_trylock(struct rw_semaphore *sem) -+{ -+ int r, old; -+ -+ /* -+ * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is -+ * set. -+ */ -+ for (r = atomic_read(&sem->readers); r < 0;) { -+ old = atomic_cmpxchg(&sem->readers, r, r + 1); -+ if (likely(old == r)) -+ return 1; -+ r = old; -+ } -+ return 0; -+} -+ -+static int __sched __down_read_common(struct rw_semaphore *sem, int state) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ struct rt_mutex_waiter waiter; -+ int ret; -+ -+ if (__down_read_trylock(sem)) -+ return 0; -+ -+ might_sleep(); -+ raw_spin_lock_irq(&m->wait_lock); -+ /* -+ * Allow readers as long as the writer has not completely -+ * acquired the semaphore for write. -+ */ -+ if (atomic_read(&sem->readers) != WRITER_BIAS) { -+ atomic_inc(&sem->readers); -+ raw_spin_unlock_irq(&m->wait_lock); -+ return 0; -+ } -+ -+ /* -+ * Call into the slow lock path with the rtmutex->wait_lock -+ * held, so this can't result in the following race: -+ * -+ * Reader1 Reader2 Writer -+ * down_read() -+ * down_write() -+ * rtmutex_lock(m) -+ * swait() -+ * down_read() -+ * unlock(m->wait_lock) -+ * up_read() -+ * swake() -+ * lock(m->wait_lock) -+ * sem->writelocked=true -+ * unlock(m->wait_lock) -+ * -+ * up_write() -+ * sem->writelocked=false -+ * rtmutex_unlock(m) -+ * down_read() -+ * down_write() -+ * rtmutex_lock(m) -+ * swait() -+ * rtmutex_lock(m) -+ * -+ * That would put Reader1 behind the writer waiting on -+ * Reader2 to call up_read() which might be unbound. -+ */ -+ rt_mutex_init_waiter(&waiter, false); -+ ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK, -+ &waiter); -+ /* -+ * The slowlock() above is guaranteed to return with the rtmutex (for -+ * ret = 0) is now held, so there can't be a writer active. Increment -+ * the reader count and immediately drop the rtmutex again. -+ * For ret != 0 we don't hold the rtmutex and need unlock the wait_lock. -+ * We don't own the lock then. -+ */ -+ if (!ret) -+ atomic_inc(&sem->readers); -+ raw_spin_unlock_irq(&m->wait_lock); -+ if (!ret) -+ __rt_mutex_unlock(m); -+ -+ debug_rt_mutex_free_waiter(&waiter); -+ return ret; -+} -+ -+void __down_read(struct rw_semaphore *sem) -+{ -+ int ret; -+ -+ ret = __down_read_common(sem, TASK_UNINTERRUPTIBLE); -+ WARN_ON_ONCE(ret); -+} -+ -+int __down_read_killable(struct rw_semaphore *sem) -+{ -+ int ret; -+ -+ ret = __down_read_common(sem, TASK_KILLABLE); -+ if (likely(!ret)) -+ return ret; -+ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret); -+ return -EINTR; -+} -+ -+void __up_read(struct rw_semaphore *sem) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ struct task_struct *tsk; -+ -+ /* -+ * sem->readers can only hit 0 when a writer is waiting for the -+ * active readers to leave the critical region. -+ */ -+ if (!atomic_dec_and_test(&sem->readers)) -+ return; -+ -+ might_sleep(); -+ raw_spin_lock_irq(&m->wait_lock); -+ /* -+ * Wake the writer, i.e. the rtmutex owner. It might release the -+ * rtmutex concurrently in the fast path (due to a signal), but to -+ * clean up the rwsem it needs to acquire m->wait_lock. The worst -+ * case which can happen is a spurious wakeup. -+ */ -+ tsk = rt_mutex_owner(m); -+ if (tsk) -+ wake_up_process(tsk); -+ -+ raw_spin_unlock_irq(&m->wait_lock); -+} -+ -+static void __up_write_unlock(struct rw_semaphore *sem, int bias, -+ unsigned long flags) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ -+ atomic_add(READER_BIAS - bias, &sem->readers); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ __rt_mutex_unlock(m); -+} -+ -+static int __sched __down_write_common(struct rw_semaphore *sem, int state) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ unsigned long flags; -+ -+ /* Take the rtmutex as a first step */ -+ if (__rt_mutex_lock_state(m, state)) -+ return -EINTR; -+ -+ /* Force readers into slow path */ -+ atomic_sub(READER_BIAS, &sem->readers); -+ might_sleep(); -+ -+ set_current_state(state); -+ for (;;) { -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ /* Have all readers left the critical region? */ -+ if (!atomic_read(&sem->readers)) { -+ atomic_set(&sem->readers, WRITER_BIAS); -+ __set_current_state(TASK_RUNNING); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ return 0; -+ } -+ -+ if (signal_pending_state(state, current)) { -+ __set_current_state(TASK_RUNNING); -+ __up_write_unlock(sem, 0, flags); -+ return -EINTR; -+ } -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ -+ if (atomic_read(&sem->readers) != 0) { -+ schedule(); -+ set_current_state(state); -+ } -+ } -+} -+ -+void __sched __down_write(struct rw_semaphore *sem) -+{ -+ __down_write_common(sem, TASK_UNINTERRUPTIBLE); -+} -+ -+int __sched __down_write_killable(struct rw_semaphore *sem) -+{ -+ return __down_write_common(sem, TASK_KILLABLE); -+} -+ -+int __down_write_trylock(struct rw_semaphore *sem) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ unsigned long flags; -+ -+ if (!__rt_mutex_trylock(m)) -+ return 0; -+ -+ atomic_sub(READER_BIAS, &sem->readers); -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ if (!atomic_read(&sem->readers)) { -+ atomic_set(&sem->readers, WRITER_BIAS); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ return 1; -+ } -+ __up_write_unlock(sem, 0, flags); -+ return 0; -+} -+ -+void __up_write(struct rw_semaphore *sem) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ __up_write_unlock(sem, WRITER_BIAS, flags); -+} -+ -+void __downgrade_write(struct rw_semaphore *sem) -+{ -+ struct rt_mutex *m = &sem->rtmutex; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ /* Release it and account current as reader */ -+ __up_write_unlock(sem, WRITER_BIAS - 1, flags); -+} --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch deleted file mode 100644 index e3f55f58c..000000000 --- a/kernel/patches-4.19.x-rt/0146-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch +++ /dev/null @@ -1,581 +0,0 @@ -From c4d9a03dadd2e41e7670cbd53f91f7c08e3a3ce6 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 17:18:06 +0200 -Subject: [PATCH 146/328] rtmutex: add rwlock implementation based on rtmutex - -The implementation is bias-based, similar to the rwsem implementation. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/rwlock_rt.h | 119 +++++++++++ - include/linux/rwlock_types_rt.h | 55 +++++ - kernel/locking/rwlock-rt.c | 368 ++++++++++++++++++++++++++++++++ - 3 files changed, 542 insertions(+) - create mode 100644 include/linux/rwlock_rt.h - create mode 100644 include/linux/rwlock_types_rt.h - create mode 100644 kernel/locking/rwlock-rt.c - -diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h -new file mode 100644 -index 000000000000..a9c4c2ac4d1f ---- /dev/null -+++ b/include/linux/rwlock_rt.h -@@ -0,0 +1,119 @@ -+#ifndef __LINUX_RWLOCK_RT_H -+#define __LINUX_RWLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_lock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); -+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); -+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); -+extern int __lockfunc rt_read_can_lock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_can_lock(rwlock_t *rwlock); -+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); -+ -+#define read_can_lock(rwlock) rt_read_can_lock(rwlock) -+#define write_can_lock(rwlock) rt_write_can_lock(rwlock) -+ -+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) -+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) -+ -+static inline int __write_trylock_rt_irqsave(rwlock_t *lock, unsigned long *flags) -+{ -+ /* XXX ARCH_IRQ_ENABLED */ -+ *flags = 0; -+ return rt_write_trylock(lock); -+} -+ -+#define write_trylock_irqsave(lock, flags) \ -+ __cond_lock(lock, __write_trylock_rt_irqsave(lock, &(flags))) -+ -+#define read_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ rt_read_lock(lock); \ -+ flags = 0; \ -+ } while (0) -+ -+#define write_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ rt_write_lock(lock); \ -+ flags = 0; \ -+ } while (0) -+ -+#define read_lock(lock) rt_read_lock(lock) -+ -+#define read_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_read_lock(lock); \ -+ } while (0) -+ -+#define read_lock_irq(lock) read_lock(lock) -+ -+#define write_lock(lock) rt_write_lock(lock) -+ -+#define write_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_write_lock(lock); \ -+ } while (0) -+ -+#define write_lock_irq(lock) write_lock(lock) -+ -+#define read_unlock(lock) rt_read_unlock(lock) -+ -+#define read_unlock_bh(lock) \ -+ do { \ -+ rt_read_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define read_unlock_irq(lock) read_unlock(lock) -+ -+#define write_unlock(lock) rt_write_unlock(lock) -+ -+#define write_unlock_bh(lock) \ -+ do { \ -+ rt_write_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define write_unlock_irq(lock) write_unlock(lock) -+ -+#define read_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_read_unlock(lock); \ -+ } while (0) -+ -+#define write_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_write_unlock(lock); \ -+ } while (0) -+ -+#define rwlock_init(rwl) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __rt_rwlock_init(rwl, #rwl, &__key); \ -+} while (0) -+ -+/* -+ * Internal functions made global for CPU pinning -+ */ -+void __read_rt_lock(struct rt_rw_lock *lock); -+int __read_rt_trylock(struct rt_rw_lock *lock); -+void __write_rt_lock(struct rt_rw_lock *lock); -+int __write_rt_trylock(struct rt_rw_lock *lock); -+void __read_rt_unlock(struct rt_rw_lock *lock); -+void __write_rt_unlock(struct rt_rw_lock *lock); -+ -+#endif -diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h -new file mode 100644 -index 000000000000..546a1f8f1274 ---- /dev/null -+++ b/include/linux/rwlock_types_rt.h -@@ -0,0 +1,55 @@ -+#ifndef __LINUX_RWLOCK_TYPES_RT_H -+#define __LINUX_RWLOCK_TYPES_RT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define RW_DEP_MAP_INIT(lockname) -+#endif -+ -+typedef struct rt_rw_lock rwlock_t; -+ -+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) -+ -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name = __RW_LOCK_UNLOCKED(name) -+ -+/* -+ * A reader biased implementation primarily for CPU pinning. -+ * -+ * Can be selected as general replacement for the single reader RT rwlock -+ * variant -+ */ -+struct rt_rw_lock { -+ struct rt_mutex rtmutex; -+ atomic_t readers; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define READER_BIAS (1U << 31) -+#define WRITER_BIAS (1U << 30) -+ -+#define __RWLOCK_RT_INITIALIZER(name) \ -+{ \ -+ .readers = ATOMIC_INIT(READER_BIAS), \ -+ .rtmutex = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.rtmutex), \ -+ RW_DEP_MAP_INIT(name) \ -+} -+ -+void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name, -+ struct lock_class_key *key); -+ -+#define rwlock_biased_rt_init(rwlock) \ -+ do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \ -+ } while (0) -+ -+#endif -diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c -new file mode 100644 -index 000000000000..aebb7ce25bc6 ---- /dev/null -+++ b/kernel/locking/rwlock-rt.c -@@ -0,0 +1,368 @@ -+/* -+ */ -+#include -+#include -+ -+#include "rtmutex_common.h" -+#include -+ -+/* -+ * RT-specific reader/writer locks -+ * -+ * write_lock() -+ * 1) Lock lock->rtmutex -+ * 2) Remove the reader BIAS to force readers into the slow path -+ * 3) Wait until all readers have left the critical region -+ * 4) Mark it write locked -+ * -+ * write_unlock() -+ * 1) Remove the write locked marker -+ * 2) Set the reader BIAS so readers can use the fast path again -+ * 3) Unlock lock->rtmutex to release blocked readers -+ * -+ * read_lock() -+ * 1) Try fast path acquisition (reader BIAS is set) -+ * 2) Take lock->rtmutex.wait_lock which protects the writelocked flag -+ * 3) If !writelocked, acquire it for read -+ * 4) If writelocked, block on lock->rtmutex -+ * 5) unlock lock->rtmutex, goto 1) -+ * -+ * read_unlock() -+ * 1) Try fast path release (reader count != 1) -+ * 2) Wake the writer waiting in write_lock()#3 -+ * -+ * read_lock()#3 has the consequence, that rw locks on RT are not writer -+ * fair, but writers, which should be avoided in RT tasks (think tasklist -+ * lock), are subject to the rtmutex priority/DL inheritance mechanism. -+ * -+ * It's possible to make the rw locks writer fair by keeping a list of -+ * active readers. A blocked writer would force all newly incoming readers -+ * to block on the rtmutex, but the rtmutex would have to be proxy locked -+ * for one reader after the other. We can't use multi-reader inheritance -+ * because there is no way to support that with -+ * SCHED_DEADLINE. Implementing the one by one reader boosting/handover -+ * mechanism is a major surgery for a very dubious value. -+ * -+ * The risk of writer starvation is there, but the pathological use cases -+ * which trigger it are not necessarily the typical RT workloads. -+ */ -+ -+void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held semaphore: -+ */ -+ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); -+ lockdep_init_map(&lock->dep_map, name, key, 0); -+#endif -+ atomic_set(&lock->readers, READER_BIAS); -+ rt_mutex_init(&lock->rtmutex); -+ lock->rtmutex.save_state = 1; -+} -+ -+int __read_rt_trylock(struct rt_rw_lock *lock) -+{ -+ int r, old; -+ -+ /* -+ * Increment reader count, if lock->readers < 0, i.e. READER_BIAS is -+ * set. -+ */ -+ for (r = atomic_read(&lock->readers); r < 0;) { -+ old = atomic_cmpxchg(&lock->readers, r, r + 1); -+ if (likely(old == r)) -+ return 1; -+ r = old; -+ } -+ return 0; -+} -+ -+void __sched __read_rt_lock(struct rt_rw_lock *lock) -+{ -+ struct rt_mutex *m = &lock->rtmutex; -+ struct rt_mutex_waiter waiter; -+ unsigned long flags; -+ -+ if (__read_rt_trylock(lock)) -+ return; -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ /* -+ * Allow readers as long as the writer has not completely -+ * acquired the semaphore for write. -+ */ -+ if (atomic_read(&lock->readers) != WRITER_BIAS) { -+ atomic_inc(&lock->readers); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ return; -+ } -+ -+ /* -+ * Call into the slow lock path with the rtmutex->wait_lock -+ * held, so this can't result in the following race: -+ * -+ * Reader1 Reader2 Writer -+ * read_lock() -+ * write_lock() -+ * rtmutex_lock(m) -+ * swait() -+ * read_lock() -+ * unlock(m->wait_lock) -+ * read_unlock() -+ * swake() -+ * lock(m->wait_lock) -+ * lock->writelocked=true -+ * unlock(m->wait_lock) -+ * -+ * write_unlock() -+ * lock->writelocked=false -+ * rtmutex_unlock(m) -+ * read_lock() -+ * write_lock() -+ * rtmutex_lock(m) -+ * swait() -+ * rtmutex_lock(m) -+ * -+ * That would put Reader1 behind the writer waiting on -+ * Reader2 to call read_unlock() which might be unbound. -+ */ -+ rt_mutex_init_waiter(&waiter, false); -+ rt_spin_lock_slowlock_locked(m, &waiter, flags); -+ /* -+ * The slowlock() above is guaranteed to return with the rtmutex is -+ * now held, so there can't be a writer active. Increment the reader -+ * count and immediately drop the rtmutex again. -+ */ -+ atomic_inc(&lock->readers); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ rt_spin_lock_slowunlock(m); -+ -+ debug_rt_mutex_free_waiter(&waiter); -+} -+ -+void __read_rt_unlock(struct rt_rw_lock *lock) -+{ -+ struct rt_mutex *m = &lock->rtmutex; -+ struct task_struct *tsk; -+ -+ /* -+ * sem->readers can only hit 0 when a writer is waiting for the -+ * active readers to leave the critical region. -+ */ -+ if (!atomic_dec_and_test(&lock->readers)) -+ return; -+ -+ raw_spin_lock_irq(&m->wait_lock); -+ /* -+ * Wake the writer, i.e. the rtmutex owner. It might release the -+ * rtmutex concurrently in the fast path, but to clean up the rw -+ * lock it needs to acquire m->wait_lock. The worst case which can -+ * happen is a spurious wakeup. -+ */ -+ tsk = rt_mutex_owner(m); -+ if (tsk) -+ wake_up_process(tsk); -+ -+ raw_spin_unlock_irq(&m->wait_lock); -+} -+ -+static void __write_unlock_common(struct rt_rw_lock *lock, int bias, -+ unsigned long flags) -+{ -+ struct rt_mutex *m = &lock->rtmutex; -+ -+ atomic_add(READER_BIAS - bias, &lock->readers); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ rt_spin_lock_slowunlock(m); -+} -+ -+void __sched __write_rt_lock(struct rt_rw_lock *lock) -+{ -+ struct rt_mutex *m = &lock->rtmutex; -+ struct task_struct *self = current; -+ unsigned long flags; -+ -+ /* Take the rtmutex as a first step */ -+ __rt_spin_lock(m); -+ -+ /* Force readers into slow path */ -+ atomic_sub(READER_BIAS, &lock->readers); -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ -+ raw_spin_lock(&self->pi_lock); -+ self->saved_state = self->state; -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock(&self->pi_lock); -+ -+ for (;;) { -+ /* Have all readers left the critical region? */ -+ if (!atomic_read(&lock->readers)) { -+ atomic_set(&lock->readers, WRITER_BIAS); -+ raw_spin_lock(&self->pi_lock); -+ __set_current_state_no_track(self->saved_state); -+ self->saved_state = TASK_RUNNING; -+ raw_spin_unlock(&self->pi_lock); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ return; -+ } -+ -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ -+ if (atomic_read(&lock->readers) != 0) -+ schedule(); -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ -+ raw_spin_lock(&self->pi_lock); -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock(&self->pi_lock); -+ } -+} -+ -+int __write_rt_trylock(struct rt_rw_lock *lock) -+{ -+ struct rt_mutex *m = &lock->rtmutex; -+ unsigned long flags; -+ -+ if (!__rt_mutex_trylock(m)) -+ return 0; -+ -+ atomic_sub(READER_BIAS, &lock->readers); -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ if (!atomic_read(&lock->readers)) { -+ atomic_set(&lock->readers, WRITER_BIAS); -+ raw_spin_unlock_irqrestore(&m->wait_lock, flags); -+ return 1; -+ } -+ __write_unlock_common(lock, 0, flags); -+ return 0; -+} -+ -+void __write_rt_unlock(struct rt_rw_lock *lock) -+{ -+ struct rt_mutex *m = &lock->rtmutex; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&m->wait_lock, flags); -+ __write_unlock_common(lock, WRITER_BIAS, flags); -+} -+ -+/* Map the reader biased implementation */ -+static inline int do_read_rt_trylock(rwlock_t *rwlock) -+{ -+ return __read_rt_trylock(rwlock); -+} -+ -+static inline int do_write_rt_trylock(rwlock_t *rwlock) -+{ -+ return __write_rt_trylock(rwlock); -+} -+ -+static inline void do_read_rt_lock(rwlock_t *rwlock) -+{ -+ __read_rt_lock(rwlock); -+} -+ -+static inline void do_write_rt_lock(rwlock_t *rwlock) -+{ -+ __write_rt_lock(rwlock); -+} -+ -+static inline void do_read_rt_unlock(rwlock_t *rwlock) -+{ -+ __read_rt_unlock(rwlock); -+} -+ -+static inline void do_write_rt_unlock(rwlock_t *rwlock) -+{ -+ __write_rt_unlock(rwlock); -+} -+ -+static inline void do_rwlock_rt_init(rwlock_t *rwlock, const char *name, -+ struct lock_class_key *key) -+{ -+ __rwlock_biased_rt_init(rwlock, name, key); -+} -+ -+int __lockfunc rt_read_can_lock(rwlock_t *rwlock) -+{ -+ return atomic_read(&rwlock->readers) < 0; -+} -+ -+int __lockfunc rt_write_can_lock(rwlock_t *rwlock) -+{ -+ return atomic_read(&rwlock->readers) == READER_BIAS; -+} -+ -+/* -+ * The common functions which get wrapped into the rwlock API. -+ */ -+int __lockfunc rt_read_trylock(rwlock_t *rwlock) -+{ -+ int ret; -+ -+ migrate_disable(); -+ ret = do_read_rt_trylock(rwlock); -+ if (ret) -+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_read_trylock); -+ -+int __lockfunc rt_write_trylock(rwlock_t *rwlock) -+{ -+ int ret; -+ -+ migrate_disable(); -+ ret = do_write_rt_trylock(rwlock); -+ if (ret) -+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_write_trylock); -+ -+void __lockfunc rt_read_lock(rwlock_t *rwlock) -+{ -+ migrate_disable(); -+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); -+ do_read_rt_lock(rwlock); -+} -+EXPORT_SYMBOL(rt_read_lock); -+ -+void __lockfunc rt_write_lock(rwlock_t *rwlock) -+{ -+ migrate_disable(); -+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); -+ do_write_rt_lock(rwlock); -+} -+EXPORT_SYMBOL(rt_write_lock); -+ -+void __lockfunc rt_read_unlock(rwlock_t *rwlock) -+{ -+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); -+ do_read_rt_unlock(rwlock); -+ migrate_enable(); -+} -+EXPORT_SYMBOL(rt_read_unlock); -+ -+void __lockfunc rt_write_unlock(rwlock_t *rwlock) -+{ -+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); -+ do_write_rt_unlock(rwlock); -+ migrate_enable(); -+} -+EXPORT_SYMBOL(rt_write_unlock); -+ -+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) -+{ -+ do_rwlock_rt_init(rwlock, name, key); -+} -+EXPORT_SYMBOL(__rt_rwlock_init); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch b/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch deleted file mode 100644 index f6afe52b7..000000000 --- a/kernel/patches-4.19.x-rt/0147-rtmutex-rwlock-preserve-state-like-a-sleeping-lock.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 997abbcc33dcad79e5670265b72051ddc3cfdce7 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 11 Jan 2019 21:16:31 +0100 -Subject: [PATCH 147/328] rtmutex/rwlock: preserve state like a sleeping lock - -The rwlock is spinning while acquiring a lock. Therefore it must become -a sleeping lock on RT and preserve its task state while sleeping and -waiting for the lock to become available. - -Reported-by: Joe Korty -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rwlock-rt.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c -index aebb7ce25bc6..8f90afe111ce 100644 ---- a/kernel/locking/rwlock-rt.c -+++ b/kernel/locking/rwlock-rt.c -@@ -128,7 +128,7 @@ void __sched __read_rt_lock(struct rt_rw_lock *lock) - * That would put Reader1 behind the writer waiting on - * Reader2 to call read_unlock() which might be unbound. - */ -- rt_mutex_init_waiter(&waiter, false); -+ rt_mutex_init_waiter(&waiter, true); - rt_spin_lock_slowlock_locked(m, &waiter, flags); - /* - * The slowlock() above is guaranteed to return with the rtmutex is --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch b/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch deleted file mode 100644 index 38bea50eb..000000000 --- a/kernel/patches-4.19.x-rt/0148-rtmutex-wire-up-RT-s-locking.patch +++ /dev/null @@ -1,270 +0,0 @@ -From c55bc0df82ccf91b888e73d888a1afb8c8cbc581 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 12 Oct 2017 17:31:14 +0200 -Subject: [PATCH 148/328] rtmutex: wire up RT's locking - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/mutex.h | 20 +++++++++++++------- - include/linux/rwsem.h | 11 +++++++++++ - include/linux/spinlock.h | 12 +++++++++++- - include/linux/spinlock_api_smp.h | 4 +++- - include/linux/spinlock_types.h | 11 ++++++++--- - kernel/locking/Makefile | 9 ++++++++- - kernel/locking/spinlock.c | 7 +++++++ - kernel/locking/spinlock_debug.c | 5 +++++ - 8 files changed, 66 insertions(+), 13 deletions(-) - -diff --git a/include/linux/mutex.h b/include/linux/mutex.h -index 3093dd162424..cad906f54d0a 100644 ---- a/include/linux/mutex.h -+++ b/include/linux/mutex.h -@@ -22,6 +22,17 @@ - - struct ww_acquire_ctx; - -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -+ , .dep_map = { .name = #lockname } -+#else -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -+#endif -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+ - /* - * Simple, straightforward mutexes with strict semantics: - * -@@ -118,13 +129,6 @@ do { \ - __mutex_init((mutex), #mutex, &__key); \ - } while (0) - --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -- , .dep_map = { .name = #lockname } --#else --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) --#endif -- - #define __MUTEX_INITIALIZER(lockname) \ - { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ -@@ -229,4 +233,6 @@ mutex_trylock_recursive(struct mutex *lock) - return mutex_trylock(lock); - } - -+#endif /* !PREEMPT_RT_FULL */ -+ - #endif /* __LINUX_MUTEX_H */ -diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h -index ab93b6eae696..b1e32373f44f 100644 ---- a/include/linux/rwsem.h -+++ b/include/linux/rwsem.h -@@ -20,6 +20,10 @@ - #include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#include -+#else /* PREEMPT_RT_FULL */ -+ - struct rw_semaphore; - - #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -@@ -114,6 +118,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) - return !list_empty(&sem->wait_list); - } - -+#endif /* !PREEMPT_RT_FULL */ -+ -+/* -+ * The functions below are the same for all rwsem implementations including -+ * the RT specific variant. -+ */ -+ - /* - * lock for reading - */ -diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h -index e089157dcf97..5f5ad0630a26 100644 ---- a/include/linux/spinlock.h -+++ b/include/linux/spinlock.h -@@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) - }) - - /* Include rwlock functions */ --#include -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+# include -+#endif - - /* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: -@@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) - # include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else /* PREEMPT_RT_FULL */ -+ - /* - * Map the spin_lock functions to the raw variants for PREEMPT_RT=n - */ -@@ -429,6 +437,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock) - - #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) - -+#endif /* !PREEMPT_RT_FULL */ -+ - /* - * Pull the atomic_t declaration: - * (asm-mips/atomic.h needs above definitions) -diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h -index 42dfab89e740..29d99ae5a8ab 100644 ---- a/include/linux/spinlock_api_smp.h -+++ b/include/linux/spinlock_api_smp.h -@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) - return 0; - } - --#include -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+#endif - - #endif /* __LINUX_SPINLOCK_API_SMP_H */ -diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h -index 5c8664d57fb8..10bac715ea96 100644 ---- a/include/linux/spinlock_types.h -+++ b/include/linux/spinlock_types.h -@@ -11,8 +11,13 @@ - - #include - --#include -- --#include -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+# include -+#else -+# include -+# include -+# include -+#endif - - #endif /* __LINUX_SPINLOCK_TYPES_H */ -diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile -index 392c7f23af76..c0bf04b6b965 100644 ---- a/kernel/locking/Makefile -+++ b/kernel/locking/Makefile -@@ -3,7 +3,7 @@ - # and is generally not a function of system call inputs. - KCOV_INSTRUMENT := n - --obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o -+obj-y += semaphore.o percpu-rwsem.o - - ifdef CONFIG_FUNCTION_TRACER - CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) -@@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) - CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) - endif - -+ifneq ($(CONFIG_PREEMPT_RT_FULL),y) -+obj-y += mutex.o - obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o -+endif -+obj-y += rwsem.o - obj-$(CONFIG_LOCKDEP) += lockdep.o - ifeq ($(CONFIG_PROC_FS),y) - obj-$(CONFIG_LOCKDEP) += lockdep_proc.o -@@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o - obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o - obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o - obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o -+ifneq ($(CONFIG_PREEMPT_RT_FULL),y) - obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o - obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o -+endif -+obj-$(CONFIG_PREEMPT_RT_FULL) += mutex-rt.o rwsem-rt.o rwlock-rt.o - obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o - obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o - obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o -diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c -index 936f3d14dd6b..e89b70f474af 100644 ---- a/kernel/locking/spinlock.c -+++ b/kernel/locking/spinlock.c -@@ -117,8 +117,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ - * __[spin|read|write]_lock_bh() - */ - BUILD_LOCK_OPS(spin, raw_spinlock); -+ -+#ifndef CONFIG_PREEMPT_RT_FULL - BUILD_LOCK_OPS(read, rwlock); - BUILD_LOCK_OPS(write, rwlock); -+#endif - - #endif - -@@ -202,6 +205,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) - EXPORT_SYMBOL(_raw_spin_unlock_bh); - #endif - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #ifndef CONFIG_INLINE_READ_TRYLOCK - int __lockfunc _raw_read_trylock(rwlock_t *lock) - { -@@ -346,6 +351,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) - EXPORT_SYMBOL(_raw_write_unlock_bh); - #endif - -+#endif /* !PREEMPT_RT_FULL */ -+ - #ifdef CONFIG_DEBUG_LOCK_ALLOC - - void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) -diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c -index 03595c29c566..d63df281b495 100644 ---- a/kernel/locking/spinlock_debug.c -+++ b/kernel/locking/spinlock_debug.c -@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, - - EXPORT_SYMBOL(__raw_spin_lock_init); - -+#ifndef CONFIG_PREEMPT_RT_FULL - void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key) - { -@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, - } - - EXPORT_SYMBOL(__rwlock_init); -+#endif - - static void spin_dump(raw_spinlock_t *lock, const char *msg) - { -@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) - arch_spin_unlock(&lock->raw_lock); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - static void rwlock_bug(rwlock_t *lock, const char *msg) - { - if (!debug_locks_off()) -@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock) - debug_write_unlock(lock); - arch_write_unlock(&lock->raw_lock); - } -+ -+#endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch deleted file mode 100644 index 95fe7b208..000000000 --- a/kernel/patches-4.19.x-rt/0149-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch +++ /dev/null @@ -1,444 +0,0 @@ -From 01d5bbd35ae2a2b3350c42e526b0199fb3836e48 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 12 Oct 2017 17:34:38 +0200 -Subject: [PATCH 149/328] rtmutex: add ww_mutex addon for mutex-rt - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 271 ++++++++++++++++++++++++++++++-- - kernel/locking/rtmutex_common.h | 2 + - kernel/locking/rwsem-rt.c | 2 +- - 3 files changed, 261 insertions(+), 14 deletions(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 88df1ff7ca2d..1f2dc2dfe2e7 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -23,6 +23,7 @@ - #include - #include - #include -+#include - - #include "rtmutex_common.h" - -@@ -1245,6 +1246,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init); - - #endif /* PREEMPT_RT_FULL */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ static inline int __sched -+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); -+ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); -+ -+ if (!hold_ctx) -+ return 0; -+ -+ if (unlikely(ctx == hold_ctx)) -+ return -EALREADY; -+ -+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && -+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { -+#ifdef CONFIG_DEBUG_MUTEXES -+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); -+ ctx->contending_lock = ww; -+#endif -+ return -EDEADLK; -+ } -+ -+ return 0; -+} -+#else -+ static inline int __sched -+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ BUG(); -+ return 0; -+} -+ -+#endif -+ - static inline int - try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) -@@ -1523,7 +1558,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) - static int __sched - __rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- struct rt_mutex_waiter *waiter) -+ struct rt_mutex_waiter *waiter, -+ struct ww_acquire_ctx *ww_ctx) - { - int ret = 0; - -@@ -1541,6 +1577,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, - break; - } - -+ if (ww_ctx && ww_ctx->acquired > 0) { -+ ret = __mutex_lock_check_stamp(lock, ww_ctx); -+ if (ret) -+ break; -+ } -+ - raw_spin_unlock_irq(&lock->wait_lock); - - debug_rt_mutex_print_deadlock(waiter); -@@ -1575,16 +1617,106 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, - } - } - -+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+#ifdef CONFIG_DEBUG_MUTEXES -+ /* -+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, -+ * but released with a normal mutex_unlock in this call. -+ * -+ * This should never happen, always use ww_mutex_unlock. -+ */ -+ DEBUG_LOCKS_WARN_ON(ww->ctx); -+ -+ /* -+ * Not quite done after calling ww_acquire_done() ? -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); -+ -+ if (ww_ctx->contending_lock) { -+ /* -+ * After -EDEADLK you tried to -+ * acquire a different ww_mutex? Bad! -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); -+ -+ /* -+ * You called ww_mutex_lock after receiving -EDEADLK, -+ * but 'forgot' to unlock everything else first? -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); -+ ww_ctx->contending_lock = NULL; -+ } -+ -+ /* -+ * Naughty, using a different class will lead to undefined behavior! -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); -+#endif -+ ww_ctx->acquired++; -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void ww_mutex_account_lock(struct rt_mutex *lock, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); -+ struct rt_mutex_waiter *waiter, *n; -+ -+ /* -+ * This branch gets optimized out for the common case, -+ * and is only important for ww_mutex_lock. -+ */ -+ ww_mutex_lock_acquired(ww, ww_ctx); -+ ww->ctx = ww_ctx; -+ -+ /* -+ * Give any possible sleeping processes the chance to wake up, -+ * so they can recheck if they have to back off. -+ */ -+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root, -+ tree_entry) { -+ /* XXX debug rt mutex waiter wakeup */ -+ -+ BUG_ON(waiter->lock != lock); -+ rt_mutex_wake_waiter(waiter); -+ } -+} -+ -+#else -+ -+static void ww_mutex_account_lock(struct rt_mutex *lock, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+ BUG(); -+} -+#endif -+ - int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx, - struct rt_mutex_waiter *waiter) - { - int ret; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (ww_ctx) { -+ struct ww_mutex *ww; -+ -+ ww = container_of(lock, struct ww_mutex, base.lock); -+ if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) -+ return -EALREADY; -+ } -+#endif -+ - /* Try to acquire the lock again: */ -- if (try_to_take_rt_mutex(lock, current, NULL)) -+ if (try_to_take_rt_mutex(lock, current, NULL)) { -+ if (ww_ctx) -+ ww_mutex_account_lock(lock, ww_ctx); - return 0; -+ } - - set_current_state(state); - -@@ -1594,14 +1726,24 @@ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - - ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); - -- if (likely(!ret)) -+ if (likely(!ret)) { - /* sleep on the mutex */ -- ret = __rt_mutex_slowlock(lock, state, timeout, waiter); -+ ret = __rt_mutex_slowlock(lock, state, timeout, waiter, -+ ww_ctx); -+ } else if (ww_ctx) { -+ /* ww_mutex received EDEADLK, let it become EALREADY */ -+ ret = __mutex_lock_check_stamp(lock, ww_ctx); -+ BUG_ON(!ret); -+ } - - if (unlikely(ret)) { - __set_current_state(TASK_RUNNING); - remove_waiter(lock, waiter); -- rt_mutex_handle_deadlock(ret, chwalk, waiter); -+ /* ww_mutex wants to report EDEADLK/EALREADY, let it */ -+ if (!ww_ctx) -+ rt_mutex_handle_deadlock(ret, chwalk, waiter); -+ } else if (ww_ctx) { -+ ww_mutex_account_lock(lock, ww_ctx); - } - - /* -@@ -1618,7 +1760,8 @@ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - static int __sched - rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx) - { - struct rt_mutex_waiter waiter; - unsigned long flags; -@@ -1636,7 +1779,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, - */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); - -- ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter); -+ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx, -+ &waiter); - - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - -@@ -1766,29 +1910,33 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, - */ - static inline int - rt_mutex_fastlock(struct rt_mutex *lock, int state, -+ struct ww_acquire_ctx *ww_ctx, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk)) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx)) - { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - -- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); -+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); - } - - static inline int - rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk)) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx)) - { - if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - -- return slowfn(lock, state, timeout, chwalk); -+ return slowfn(lock, state, timeout, chwalk, ww_ctx); - } - - static inline int -@@ -1833,7 +1981,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock, - int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) - { - might_sleep(); -- return rt_mutex_fastlock(lock, state, rt_mutex_slowlock); -+ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock); - } - - /** -@@ -1953,6 +2101,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_MIN_CHAINWALK, -+ NULL, - rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, 1, _RET_IP_); -@@ -2300,7 +2449,7 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - raw_spin_lock_irq(&lock->wait_lock); - /* sleep on the mutex */ - set_current_state(TASK_INTERRUPTIBLE); -- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); -+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); - /* - * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might - * have to fix that up. -@@ -2385,3 +2534,99 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, - - return cleanup; - } -+ -+static inline int -+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH -+ unsigned tmp; -+ -+ if (ctx->deadlock_inject_countdown-- == 0) { -+ tmp = ctx->deadlock_inject_interval; -+ if (tmp > UINT_MAX/4) -+ tmp = UINT_MAX; -+ else -+ tmp = tmp*2 + tmp + tmp/2; -+ -+ ctx->deadlock_inject_interval = tmp; -+ ctx->deadlock_inject_countdown = tmp; -+ ctx->contending_lock = lock; -+ -+ ww_mutex_unlock(lock); -+ -+ return -EDEADLK; -+ } -+#endif -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+int __sched -+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ int ret; -+ -+ might_sleep(); -+ -+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, -+ ctx ? &ctx->dep_map : NULL, _RET_IP_); -+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, -+ ctx); -+ if (ret) -+ mutex_release(&lock->base.dep_map, 1, _RET_IP_); -+ else if (!ret && ctx && ctx->acquired > 1) -+ return ww_mutex_deadlock_injection(lock, ctx); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); -+ -+int __sched -+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ int ret; -+ -+ might_sleep(); -+ -+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, -+ ctx ? &ctx->dep_map : NULL, _RET_IP_); -+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, -+ ctx); -+ if (ret) -+ mutex_release(&lock->base.dep_map, 1, _RET_IP_); -+ else if (!ret && ctx && ctx->acquired > 1) -+ return ww_mutex_deadlock_injection(lock, ctx); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(ww_mutex_lock); -+ -+void __sched ww_mutex_unlock(struct ww_mutex *lock) -+{ -+ int nest = !!lock->ctx; -+ -+ /* -+ * The unlocking fastpath is the 0->1 transition from 'locked' -+ * into 'unlocked' state: -+ */ -+ if (nest) { -+#ifdef CONFIG_DEBUG_MUTEXES -+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); -+#endif -+ if (lock->ctx->acquired > 0) -+ lock->ctx->acquired--; -+ lock->ctx = NULL; -+ } -+ -+ mutex_release(&lock->base.dep_map, nest, _RET_IP_); -+ __rt_mutex_unlock(&lock->base.lock); -+} -+EXPORT_SYMBOL(ww_mutex_unlock); -+ -+int __rt_mutex_owner_current(struct rt_mutex *lock) -+{ -+ return rt_mutex_owner(lock) == current; -+} -+EXPORT_SYMBOL(__rt_mutex_owner_current); -+#endif -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index 6fcf0a3e180d..546aaf058b9e 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -165,6 +165,7 @@ extern void rt_mutex_postunlock(struct wake_q_head *wake_q, - struct wake_q_head *wake_sleeper_q); - - /* RW semaphore special interface */ -+struct ww_acquire_ctx; - - extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); - extern int __rt_mutex_trylock(struct rt_mutex *lock); -@@ -172,6 +173,7 @@ extern void __rt_mutex_unlock(struct rt_mutex *lock); - int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx, - struct rt_mutex_waiter *waiter); - void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, -diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c -index 7d3c5cf3d23d..660e22caf709 100644 ---- a/kernel/locking/rwsem-rt.c -+++ b/kernel/locking/rwsem-rt.c -@@ -131,7 +131,7 @@ static int __sched __down_read_common(struct rw_semaphore *sem, int state) - */ - rt_mutex_init_waiter(&waiter, false); - ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK, -- &waiter); -+ NULL, &waiter); - /* - * The slowlock() above is guaranteed to return with the rtmutex (for - * ret = 0) is now held, so there can't be a writer active. Increment --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch b/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch deleted file mode 100644 index d72ec241f..000000000 --- a/kernel/patches-4.19.x-rt/0150-kconfig-Add-PREEMPT_RT_FULL.patch +++ /dev/null @@ -1,68 +0,0 @@ -From c924e85c609186a184aecf090ef2251090f84d6b Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 29 Jun 2011 14:58:57 +0200 -Subject: [PATCH 150/328] kconfig: Add PREEMPT_RT_FULL - -Introduce the final symbol for PREEMPT_RT_FULL. - -Signed-off-by: Thomas Gleixner ---- - init/Makefile | 2 +- - kernel/Kconfig.preempt | 8 ++++++++ - scripts/mkcompile_h | 4 +++- - 3 files changed, 12 insertions(+), 2 deletions(-) - -diff --git a/init/Makefile b/init/Makefile -index a3e5ce2bcf08..7779232563ae 100644 ---- a/init/Makefile -+++ b/init/Makefile -@@ -34,4 +34,4 @@ silent_chk_compile.h = : - include/generated/compile.h: FORCE - @$($(quiet)chk_compile.h) - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ -- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" -+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" -diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt -index 027db5976c2f..907d72b3ba95 100644 ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -69,6 +69,14 @@ config PREEMPT_RTB - enables changes which are preliminary for the full preemptible - RT kernel. - -+config PREEMPT_RT_FULL -+ bool "Fully Preemptible Kernel (RT)" -+ depends on IRQ_FORCED_THREADING -+ select PREEMPT_RT_BASE -+ select PREEMPT_RCU -+ help -+ All and everything -+ - endchoice - - config PREEMPT_COUNT -diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h -index 87f1fc9801d7..f67b15236936 100755 ---- a/scripts/mkcompile_h -+++ b/scripts/mkcompile_h -@@ -5,7 +5,8 @@ TARGET=$1 - ARCH=$2 - SMP=$3 - PREEMPT=$4 --CC=$5 -+RT=$5 -+CC=$6 - - vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } - -@@ -53,6 +54,7 @@ UTS_VERSION="#$VERSION" - CONFIG_FLAGS="" - if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi - if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi -+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi - UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" - - # Truncate to maximum length --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch deleted file mode 100644 index 25bf2e083..000000000 --- a/kernel/patches-4.19.x-rt/0151-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch +++ /dev/null @@ -1,80 +0,0 @@ -From a0a7e75d70aae3240d9a6514b4fcfa1d7e1c9c1b Mon Sep 17 00:00:00 2001 -From: Mikulas Patocka -Date: Mon, 13 Nov 2017 12:56:53 -0500 -Subject: [PATCH 151/328] locking/rt-mutex: fix deadlock in device mapper / - block-IO - -When some block device driver creates a bio and submits it to another -block device driver, the bio is added to current->bio_list (in order to -avoid unbounded recursion). - -However, this queuing of bios can cause deadlocks, in order to avoid them, -device mapper registers a function flush_current_bio_list. This function -is called when device mapper driver blocks. It redirects bios queued on -current->bio_list to helper workqueues, so that these bios can proceed -even if the driver is blocked. - -The problem with CONFIG_PREEMPT_RT_FULL is that when the device mapper -driver blocks, it won't call flush_current_bio_list (because -tsk_is_pi_blocked returns true in sched_submit_work), so deadlocks in -block device stack can happen. - -Note that we can't call blk_schedule_flush_plug if tsk_is_pi_blocked -returns true - that would cause -BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in -task_blocks_on_rt_mutex when flush_current_bio_list attempts to take a -spinlock. - -So the proper fix is to call blk_schedule_flush_plug in rt_mutex_fastlock, -when fast acquire failed and when the task is about to block. - -CC: stable-rt@vger.kernel.org -[bigeasy: The deadlock is not device-mapper specific, it can also occur - in plain EXT4] -Signed-off-by: Mikulas Patocka -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 1f2dc2dfe2e7..b38c3a92dce8 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - #include "rtmutex_common.h" - -@@ -1919,6 +1920,15 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - -+ /* -+ * If rt_mutex blocks, the function sched_submit_work will not call -+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). -+ * We must call blk_schedule_flush_plug here, if we don't call it, -+ * a deadlock in I/O may happen. -+ */ -+ if (unlikely(blk_needs_flush_plug(current))) -+ blk_schedule_flush_plug(current); -+ - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); - } - -@@ -1936,6 +1946,9 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - -+ if (unlikely(blk_needs_flush_plug(current))) -+ blk_schedule_flush_plug(current); -+ - return slowfn(lock, state, timeout, chwalk, ww_ctx); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch deleted file mode 100644 index d063c72cf..000000000 --- a/kernel/patches-4.19.x-rt/0152-locking-rt-mutex-Flush-block-plug-on-__down_read.patch +++ /dev/null @@ -1,45 +0,0 @@ -From ea61ac000de8f214ddfdf1ac240ea9776c84c5db Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Fri, 4 Jan 2019 15:33:21 -0500 -Subject: [PATCH 152/328] locking/rt-mutex: Flush block plug on __down_read() - -__down_read() bypasses the rtmutex frontend to call -rt_mutex_slowlock_locked() directly, and thus it needs to call -blk_schedule_flush_flug() itself. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rwsem-rt.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c -index 660e22caf709..f518495bd6cc 100644 ---- a/kernel/locking/rwsem-rt.c -+++ b/kernel/locking/rwsem-rt.c -@@ -1,5 +1,6 @@ - /* - */ -+#include - #include - #include - #include -@@ -87,6 +88,14 @@ static int __sched __down_read_common(struct rw_semaphore *sem, int state) - - if (__down_read_trylock(sem)) - return 0; -+ /* -+ * If rt_mutex blocks, the function sched_submit_work will not call -+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true). -+ * We must call blk_schedule_flush_plug here, if we don't call it, -+ * a deadlock in I/O may happen. -+ */ -+ if (unlikely(blk_needs_flush_plug(current))) -+ blk_schedule_flush_plug(current); - - might_sleep(); - raw_spin_lock_irq(&m->wait_lock); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch deleted file mode 100644 index dc0f91983..000000000 --- a/kernel/patches-4.19.x-rt/0153-locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch +++ /dev/null @@ -1,38 +0,0 @@ -From a6f0c5a56834d5fe16591a7fe6e5586f35ca6a00 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 16 Nov 2017 16:48:48 +0100 -Subject: [PATCH 153/328] locking/rtmutex: re-init the wait_lock in - rt_mutex_init_proxy_locked() - -We could provide a key-class for the lockdep (and fixup all callers) or -move the init to all callers (like it was) in order to avoid lockdep -seeing a double-lock of the wait_lock. - -Reported-by: Fernando Lopez-Lezcano -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/locking/rtmutex.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index b38c3a92dce8..94788662b2f2 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -2281,6 +2281,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner) - { - __rt_mutex_init(lock, NULL, NULL); -+#ifdef CONFIG_DEBUG_SPINLOCK -+ /* -+ * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is -+ * holding the ->wait_lock of the proxy_lock while unlocking a sleeping -+ * lock. -+ */ -+ raw_spin_lock_init(&lock->wait_lock); -+#endif - debug_rt_mutex_proxy_lock(lock, proxy_owner); - rt_mutex_set_owner(lock, proxy_owner); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch deleted file mode 100644 index b2b9b70d7..000000000 --- a/kernel/patches-4.19.x-rt/0154-ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ /dev/null @@ -1,165 +0,0 @@ -From e5ddc20b573d59f3760489cedf7080c8634ae241 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 29 Aug 2013 18:21:04 +0200 -Subject: [PATCH 154/328] ptrace: fix ptrace vs tasklist_lock race - -As explained by Alexander Fyodorov : - -|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel, -|and it can remove __TASK_TRACED from task->state (by moving it to -|task->saved_state). If parent does wait() on child followed by a sys_ptrace -|call, the following race can happen: -| -|- child sets __TASK_TRACED in ptrace_stop() -|- parent does wait() which eventually calls wait_task_stopped() and returns -| child's pid -|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves -| __TASK_TRACED flag to saved_state -|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive() - -The patch is based on his initial patch where an additional check is -added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is -taken in case the caller is interrupted between looking into ->state and -->saved_state. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/sched.h | 49 +++++++++++++++++++++++++++++++++++++++---- - kernel/ptrace.c | 9 +++++++- - kernel/sched/core.c | 17 +++++++++++++-- - 3 files changed, 68 insertions(+), 7 deletions(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 527d04f9163e..dd47fd913997 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -101,12 +101,8 @@ struct task_group; - __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ - TASK_PARKED) - --#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) -- - #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) - --#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -- - #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) -@@ -1717,6 +1713,51 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); - } - -+static inline bool __task_is_stopped_or_traced(struct task_struct *task) -+{ -+ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#endif -+ return false; -+} -+ -+static inline bool task_is_stopped_or_traced(struct task_struct *task) -+{ -+ bool traced_stopped; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); -+ traced_stopped = __task_is_stopped_or_traced(task); -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+#else -+ traced_stopped = __task_is_stopped_or_traced(task); -+#endif -+ return traced_stopped; -+} -+ -+static inline bool task_is_traced(struct task_struct *task) -+{ -+ bool traced = false; -+ -+ if (task->state & __TASK_TRACED) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* in case the task is sleeping on tasklist_lock */ -+ raw_spin_lock_irq(&task->pi_lock); -+ if (task->state & __TASK_TRACED) -+ traced = true; -+ else if (task->saved_state & __TASK_TRACED) -+ traced = true; -+ raw_spin_unlock_irq(&task->pi_lock); -+#endif -+ return traced; -+} -+ - /* - * cond_resched() and cond_resched_lock(): latency reduction via - * explicit rescheduling in places that are safe. The return -diff --git a/kernel/ptrace.c b/kernel/ptrace.c -index b93eb4eaf7ac..a38b304fb9fd 100644 ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -174,7 +174,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) - - spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !__fatal_signal_pending(task)) { -- task->state = __TASK_TRACED; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); -+ if (task->state & __TASK_TRACED) -+ task->state = __TASK_TRACED; -+ else -+ task->saved_state = __TASK_TRACED; -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); - ret = true; - } - spin_unlock_irq(&task->sighand->siglock); -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6a0ccaea2b42..6bab30347081 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1349,6 +1349,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p, - } - #endif /* CONFIG_NUMA_BALANCING */ - -+static bool check_task_state(struct task_struct *p, long match_state) -+{ -+ bool match = false; -+ -+ raw_spin_lock_irq(&p->pi_lock); -+ if (p->state == match_state || p->saved_state == match_state) -+ match = true; -+ raw_spin_unlock_irq(&p->pi_lock); -+ -+ return match; -+} -+ - /* - * wait_task_inactive - wait for a thread to unschedule. - * -@@ -1393,7 +1405,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) - * is actually now running somewhere else! - */ - while (task_running(rq, p)) { -- if (match_state && unlikely(p->state != match_state)) -+ if (match_state && !check_task_state(p, match_state)) - return 0; - cpu_relax(); - } -@@ -1408,7 +1420,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) - running = task_running(rq, p); - queued = task_on_rq_queued(p); - ncsw = 0; -- if (!match_state || p->state == match_state) -+ if (!match_state || p->state == match_state || -+ p->saved_state == match_state) - ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ - task_rq_unlock(rq, p, &rf); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch b/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch deleted file mode 100644 index 6734daffe..000000000 --- a/kernel/patches-4.19.x-rt/0155-rtmutex-annotate-sleeping-lock-context.patch +++ /dev/null @@ -1,307 +0,0 @@ -From f030909005b15efe1f934a507d10971242608d81 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 21 Sep 2017 14:25:13 +0200 -Subject: [PATCH 155/328] rtmutex: annotate sleeping lock context - -The RCU code complains on schedule() within a rcu_readlock() section. -The valid scenario on -RT is if a sleeping is held. In order to suppress -the warning the mirgrate_disable counter was used to identify the -invocation of schedule() due to lock contention. - -Grygorii Strashko report that during CPU hotplug we might see the -warning via - rt_spin_lock() -> migrate_disable() -> pin_current_cpu() -> __read_rt_lock() - -because the counter is not yet set. -It is also possible to trigger the warning from cpu_chill() -(seen on a kblockd_mod_delayed_work_on() caller). - -To address this RCU warning I annotate the sleeping lock context. The -counter is incremented before migrate_disable() so the warning Grygorii -should not trigger anymore. Additionally I use that counter in -cpu_chill() to avoid the RCU warning from there. - -Reported-by: Grygorii Strashko -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/preempt.h | 9 ++++++++ - include/linux/sched.h | 26 ++++++++++++++++++++++ - kernel/locking/rtmutex.c | 12 ++++++++-- - kernel/locking/rwlock-rt.c | 18 +++++++++++---- - kernel/rcu/tree_plugin.h | 6 ++++- - kernel/sched/core.c | 45 ++++++++++++++++++++++++++++++++++++++ - 6 files changed, 109 insertions(+), 7 deletions(-) - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index 27c3176d88d2..9eafc34898b4 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -211,6 +211,15 @@ extern void migrate_enable(void); - - int __migrate_disabled(struct task_struct *p); - -+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -+ -+extern void migrate_disable(void); -+extern void migrate_enable(void); -+static inline int __migrate_disabled(struct task_struct *p) -+{ -+ return 0; -+} -+ - #else - #define migrate_disable() barrier() - #define migrate_enable() barrier() -diff --git a/include/linux/sched.h b/include/linux/sched.h -index dd47fd913997..682cf84a7e1b 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -673,6 +673,15 @@ struct task_struct { - # ifdef CONFIG_SCHED_DEBUG - int migrate_disable_atomic; - # endif -+ -+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -+ int migrate_disable; -+# ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable_atomic; -+# endif -+#endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int sleeping_lock; - #endif - - #ifdef CONFIG_PREEMPT_RCU -@@ -1810,6 +1819,23 @@ static __always_inline bool need_resched(void) - return unlikely(tif_need_resched()); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline void sleeping_lock_inc(void) -+{ -+ current->sleeping_lock++; -+} -+ -+static inline void sleeping_lock_dec(void) -+{ -+ current->sleeping_lock--; -+} -+ -+#else -+ -+static inline void sleeping_lock_inc(void) { } -+static inline void sleeping_lock_dec(void) { } -+#endif -+ - /* - * Wrappers for p->thread_info->cpu access. No-op on UP. - */ -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 94788662b2f2..2a9bf2443acc 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1141,6 +1141,7 @@ void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) - - void __lockfunc rt_spin_lock(spinlock_t *lock) - { -+ sleeping_lock_inc(); - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -@@ -1155,6 +1156,7 @@ void __lockfunc __rt_spin_lock(struct rt_mutex *lock) - #ifdef CONFIG_DEBUG_LOCK_ALLOC - void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) - { -+ sleeping_lock_inc(); - migrate_disable(); - spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -@@ -1168,6 +1170,7 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock) - spin_release(&lock->dep_map, 1, _RET_IP_); - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); - migrate_enable(); -+ sleeping_lock_dec(); - } - EXPORT_SYMBOL(rt_spin_unlock); - -@@ -1193,12 +1196,15 @@ int __lockfunc rt_spin_trylock(spinlock_t *lock) - { - int ret; - -+ sleeping_lock_inc(); - migrate_disable(); - ret = __rt_mutex_trylock(&lock->lock); -- if (ret) -+ if (ret) { - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -- else -+ } else { - migrate_enable(); -+ sleeping_lock_dec(); -+ } - return ret; - } - EXPORT_SYMBOL(rt_spin_trylock); -@@ -1210,6 +1216,7 @@ int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) - local_bh_disable(); - ret = __rt_mutex_trylock(&lock->lock); - if (ret) { -+ sleeping_lock_inc(); - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } else -@@ -1225,6 +1232,7 @@ int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) - *flags = 0; - ret = __rt_mutex_trylock(&lock->lock); - if (ret) { -+ sleeping_lock_inc(); - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } -diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c -index 8f90afe111ce..c3b91205161c 100644 ---- a/kernel/locking/rwlock-rt.c -+++ b/kernel/locking/rwlock-rt.c -@@ -305,12 +305,15 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) - { - int ret; - -+ sleeping_lock_inc(); - migrate_disable(); - ret = do_read_rt_trylock(rwlock); -- if (ret) -+ if (ret) { - rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); -- else -+ } else { - migrate_enable(); -+ sleeping_lock_dec(); -+ } - return ret; - } - EXPORT_SYMBOL(rt_read_trylock); -@@ -319,18 +322,22 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock) - { - int ret; - -+ sleeping_lock_inc(); - migrate_disable(); - ret = do_write_rt_trylock(rwlock); -- if (ret) -+ if (ret) { - rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -- else -+ } else { - migrate_enable(); -+ sleeping_lock_dec(); -+ } - return ret; - } - EXPORT_SYMBOL(rt_write_trylock); - - void __lockfunc rt_read_lock(rwlock_t *rwlock) - { -+ sleeping_lock_inc(); - migrate_disable(); - rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); - do_read_rt_lock(rwlock); -@@ -339,6 +346,7 @@ EXPORT_SYMBOL(rt_read_lock); - - void __lockfunc rt_write_lock(rwlock_t *rwlock) - { -+ sleeping_lock_inc(); - migrate_disable(); - rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); - do_write_rt_lock(rwlock); -@@ -350,6 +358,7 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock) - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - do_read_rt_unlock(rwlock); - migrate_enable(); -+ sleeping_lock_dec(); - } - EXPORT_SYMBOL(rt_read_unlock); - -@@ -358,6 +367,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock) - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - do_write_rt_unlock(rwlock); - migrate_enable(); -+ sleeping_lock_dec(); - } - EXPORT_SYMBOL(rt_write_unlock); - -diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index 5f6de49dc78e..35f3552b7463 100644 ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -337,9 +337,13 @@ static void rcu_preempt_note_context_switch(bool preempt) - struct task_struct *t = current; - struct rcu_data *rdp; - struct rcu_node *rnp; -+ int sleeping_l = 0; - - lockdep_assert_irqs_disabled(); -- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); -+#if defined(CONFIG_PREEMPT_RT_FULL) -+ sleeping_l = t->sleeping_lock; -+#endif -+ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l); - if (t->rcu_read_lock_nesting > 0 && - !t->rcu_read_unlock_special.b.blocked) { - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6bab30347081..d49580cb0eb2 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7354,4 +7354,49 @@ void migrate_enable(void) - preempt_enable(); - } - EXPORT_SYMBOL(migrate_enable); -+ -+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -+void migrate_disable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic() || irqs_disabled()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic++; -+#endif -+ return; -+ } -+#ifdef CONFIG_SCHED_DEBUG -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } -+#endif -+ -+ p->migrate_disable++; -+} -+EXPORT_SYMBOL(migrate_disable); -+ -+void migrate_enable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic() || irqs_disabled()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic--; -+#endif -+ return; -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } -+#endif -+ -+ WARN_ON_ONCE(p->migrate_disable <= 0); -+ p->migrate_disable--; -+} -+EXPORT_SYMBOL(migrate_enable); - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch b/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch deleted file mode 100644 index 706d2e736..000000000 --- a/kernel/patches-4.19.x-rt/0156-sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ /dev/null @@ -1,203 +0,0 @@ -From 517e8491cd9ecd425b48019a9303e744ed3200b6 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 5 Jul 2018 14:44:51 +0200 -Subject: [PATCH 156/328] sched/migrate_disable: fallback to preempt_disable() - instead barrier() - -On SMP + !RT migrate_disable() is still around. It is not part of spin_lock() -anymore so it has almost no users. However the futex code has a workaround for -the !in_atomic() part of migrate disable which fails because the matching -migrade_disable() is no longer part of spin_lock(). - -On !SMP + !RT migrate_disable() is reduced to barrier(). This is not optimal -because we few spots where a "preempt_disable()" statement was replaced with -"migrate_disable()". - -We also used the migration_disable counter to figure out if a sleeping lock is -acquired so RCU does not complain about schedule() during rcu_read_lock() while -a sleeping lock is held. This changed, we no longer use it, we have now a -sleeping_lock counter for the RCU purpose. - -This means we can now: -- for SMP + RT_BASE - full migration program, nothing changes here - -- for !SMP + RT_BASE - the migration counting is no longer required. It used to ensure that the task - is not migrated to another CPU and that this CPU remains online. !SMP ensures - that already. - Move it to CONFIG_SCHED_DEBUG so the counting is done for debugging purpose - only. - -- for all other cases including !RT - fallback to preempt_disable(). The only remaining users of migrate_disable() - are those which were converted from preempt_disable() and the futex - workaround which is already in the preempt_disable() section due to the - spin_lock that is held. - -Cc: stable-rt@vger.kernel.org -Reported-by: joe.korty@concurrent-rt.com -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/preempt.h | 6 +++--- - include/linux/sched.h | 4 ++-- - kernel/sched/core.c | 23 +++++++++++------------ - kernel/sched/debug.c | 2 +- - 4 files changed, 17 insertions(+), 18 deletions(-) - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index 9eafc34898b4..ed8413e7140f 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -204,7 +204,7 @@ do { \ - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - - extern void migrate_disable(void); - extern void migrate_enable(void); -@@ -221,8 +221,8 @@ static inline int __migrate_disabled(struct task_struct *p) - } - - #else --#define migrate_disable() barrier() --#define migrate_enable() barrier() -+#define migrate_disable() preempt_disable() -+#define migrate_enable() preempt_enable() - static inline int __migrate_disabled(struct task_struct *p) - { - return 0; -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 682cf84a7e1b..a38a2c2a8fe4 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -667,7 +667,7 @@ struct task_struct { - int nr_cpus_allowed; - const cpumask_t *cpus_ptr; - cpumask_t cpus_mask; --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - int migrate_disable; - int migrate_disable_update; - # ifdef CONFIG_SCHED_DEBUG -@@ -675,8 +675,8 @@ struct task_struct { - # endif - - #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -- int migrate_disable; - # ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable; - int migrate_disable_atomic; - # endif - #endif -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d49580cb0eb2..3b2664e691de 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1031,7 +1031,7 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma - p->nr_cpus_allowed = cpumask_weight(new_mask); - } - --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - int __migrate_disabled(struct task_struct *p) - { - return p->migrate_disable; -@@ -1071,7 +1071,7 @@ static void __do_set_cpus_allowed_tail(struct task_struct *p, - - void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) - { --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - if (__migrate_disabled(p)) { - lockdep_assert_held(&p->pi_lock); - -@@ -1145,7 +1145,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, - if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) - goto out; - --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - if (__migrate_disabled(p)) { - p->migrate_disable_update = 1; - goto out; -@@ -7210,7 +7210,7 @@ const u32 sched_prio_to_wmult[40] = { - - #undef CREATE_TRACE_POINTS - --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - - static inline void - update_nr_migratory(struct task_struct *p, long delta) -@@ -7358,45 +7358,44 @@ EXPORT_SYMBOL(migrate_enable); - #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - void migrate_disable(void) - { -+#ifdef CONFIG_SCHED_DEBUG - struct task_struct *p = current; - - if (in_atomic() || irqs_disabled()) { --#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic++; --#endif - return; - } --#ifdef CONFIG_SCHED_DEBUG -+ - if (unlikely(p->migrate_disable_atomic)) { - tracing_off(); - WARN_ON_ONCE(1); - } --#endif - - p->migrate_disable++; -+#endif -+ barrier(); - } - EXPORT_SYMBOL(migrate_disable); - - void migrate_enable(void) - { -+#ifdef CONFIG_SCHED_DEBUG - struct task_struct *p = current; - - if (in_atomic() || irqs_disabled()) { --#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic--; --#endif - return; - } - --#ifdef CONFIG_SCHED_DEBUG - if (unlikely(p->migrate_disable_atomic)) { - tracing_off(); - WARN_ON_ONCE(1); - } --#endif - - WARN_ON_ONCE(p->migrate_disable <= 0); - p->migrate_disable--; -+#endif -+ barrier(); - } - EXPORT_SYMBOL(migrate_enable); - #endif -diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index 5027158d3908..dd6c364d6f01 100644 ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -982,7 +982,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, - P(dl.runtime); - P(dl.deadline); - } --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - P(migrate_disable); - #endif - P(nr_cpus_allowed); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch deleted file mode 100644 index 3a4441ed3..000000000 --- a/kernel/patches-4.19.x-rt/0157-locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch +++ /dev/null @@ -1,181 +0,0 @@ -From f4b5fe79d518718706341e443c11b7c5fd0f7bd4 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 4 Aug 2017 17:40:42 +0200 -Subject: [PATCH 157/328] locking: don't check for __LINUX_SPINLOCK_TYPES_H on - -RT archs - -Upstream uses arch_spinlock_t within spinlock_t and requests that -spinlock_types.h header file is included first. -On -RT we have the rt_mutex with its raw_lock wait_lock which needs -architectures' spinlock_types.h header file for its definition. However -we need rt_mutex first because it is used to build the spinlock_t so -that check does not work for us. -Therefore I am dropping that check. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/alpha/include/asm/spinlock_types.h | 4 ---- - arch/arm/include/asm/spinlock_types.h | 4 ---- - arch/arm64/include/asm/spinlock_types.h | 4 ---- - arch/hexagon/include/asm/spinlock_types.h | 4 ---- - arch/ia64/include/asm/spinlock_types.h | 4 ---- - arch/powerpc/include/asm/spinlock_types.h | 4 ---- - arch/s390/include/asm/spinlock_types.h | 4 ---- - arch/sh/include/asm/spinlock_types.h | 4 ---- - arch/xtensa/include/asm/spinlock_types.h | 4 ---- - include/linux/spinlock_types_up.h | 4 ---- - 10 files changed, 40 deletions(-) - -diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h -index 1d5716bc060b..6883bc952d22 100644 ---- a/arch/alpha/include/asm/spinlock_types.h -+++ b/arch/alpha/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef _ALPHA_SPINLOCK_TYPES_H - #define _ALPHA_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - volatile unsigned int lock; - } arch_spinlock_t; -diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h -index 5976958647fe..a37c0803954b 100644 ---- a/arch/arm/include/asm/spinlock_types.h -+++ b/arch/arm/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef __ASM_SPINLOCK_TYPES_H - #define __ASM_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - #define TICKET_SHIFT 16 - - typedef struct { -diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h -index a157ff465e27..f952fdda8346 100644 ---- a/arch/arm64/include/asm/spinlock_types.h -+++ b/arch/arm64/include/asm/spinlock_types.h -@@ -16,10 +16,6 @@ - #ifndef __ASM_SPINLOCK_TYPES_H - #define __ASM_SPINLOCK_TYPES_H - --#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) --# error "please don't include this file directly" --#endif -- - #include - #include - -diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h -index 7a906b5214a4..d8f596fec022 100644 ---- a/arch/hexagon/include/asm/spinlock_types.h -+++ b/arch/hexagon/include/asm/spinlock_types.h -@@ -21,10 +21,6 @@ - #ifndef _ASM_SPINLOCK_TYPES_H - #define _ASM_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - volatile unsigned int lock; - } arch_spinlock_t; -diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h -index 6e345fefcdca..681408d6816f 100644 ---- a/arch/ia64/include/asm/spinlock_types.h -+++ b/arch/ia64/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef _ASM_IA64_SPINLOCK_TYPES_H - #define _ASM_IA64_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - volatile unsigned int lock; - } arch_spinlock_t; -diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h -index 87adaf13b7e8..7305cb6a53e4 100644 ---- a/arch/powerpc/include/asm/spinlock_types.h -+++ b/arch/powerpc/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H - #define _ASM_POWERPC_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - volatile unsigned int slock; - } arch_spinlock_t; -diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h -index cfed272e4fd5..8e28e8176ec8 100644 ---- a/arch/s390/include/asm/spinlock_types.h -+++ b/arch/s390/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef __ASM_SPINLOCK_TYPES_H - #define __ASM_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - int lock; - } __attribute__ ((aligned (4))) arch_spinlock_t; -diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h -index e82369f286a2..22ca9a98bbb8 100644 ---- a/arch/sh/include/asm/spinlock_types.h -+++ b/arch/sh/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef __ASM_SH_SPINLOCK_TYPES_H - #define __ASM_SH_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - volatile unsigned int lock; - } arch_spinlock_t; -diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h -index bb1fe6c1816e..8a22f1e7b6c9 100644 ---- a/arch/xtensa/include/asm/spinlock_types.h -+++ b/arch/xtensa/include/asm/spinlock_types.h -@@ -2,10 +2,6 @@ - #ifndef __ASM_SPINLOCK_TYPES_H - #define __ASM_SPINLOCK_TYPES_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - typedef struct { - volatile unsigned int slock; - } arch_spinlock_t; -diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h -index c09b6407ae1b..b0243ba07fb7 100644 ---- a/include/linux/spinlock_types_up.h -+++ b/include/linux/spinlock_types_up.h -@@ -1,10 +1,6 @@ - #ifndef __LINUX_SPINLOCK_TYPES_UP_H - #define __LINUX_SPINLOCK_TYPES_UP_H - --#ifndef __LINUX_SPINLOCK_TYPES_H --# error "please don't include this file directly" --#endif -- - /* - * include/linux/spinlock_types_up.h - spinlock type definitions for UP - * --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch b/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch deleted file mode 100644 index c7ea51819..000000000 --- a/kernel/patches-4.19.x-rt/0158-rcu-Frob-softirq-test.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 2e1b076839800d30698ad2fdfad982acb9acaac9 Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Sat, 13 Aug 2011 00:23:17 +0200 -Subject: [PATCH 158/328] rcu: Frob softirq test - -With RT_FULL we get the below wreckage: - -[ 126.060484] ======================================================= -[ 126.060486] [ INFO: possible circular locking dependency detected ] -[ 126.060489] 3.0.1-rt10+ #30 -[ 126.060490] ------------------------------------------------------- -[ 126.060492] irq/24-eth0/1235 is trying to acquire lock: -[ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [] rt_mutex_slowunlock+0x16/0x55 -[ 126.060503] -[ 126.060504] but task is already holding lock: -[ 126.060506] (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 -[ 126.060511] -[ 126.060511] which lock already depends on the new lock. -[ 126.060513] -[ 126.060514] -[ 126.060514] the existing dependency chain (in reverse order) is: -[ 126.060516] -[ 126.060516] -> #1 (&p->pi_lock){-...-.}: -[ 126.060519] [] lock_acquire+0x145/0x18a -[ 126.060524] [] _raw_spin_lock_irqsave+0x4b/0x85 -[ 126.060527] [] task_blocks_on_rt_mutex+0x36/0x20f -[ 126.060531] [] rt_mutex_slowlock+0xd1/0x15a -[ 126.060534] [] rt_mutex_lock+0x2d/0x2f -[ 126.060537] [] rcu_boost+0xad/0xde -[ 126.060541] [] rcu_boost_kthread+0x7d/0x9b -[ 126.060544] [] kthread+0x99/0xa1 -[ 126.060547] [] kernel_thread_helper+0x4/0x10 -[ 126.060551] -[ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}: -[ 126.060555] [] __lock_acquire+0x1157/0x1816 -[ 126.060558] [] lock_acquire+0x145/0x18a -[ 126.060561] [] _raw_spin_lock+0x40/0x73 -[ 126.060564] [] rt_mutex_slowunlock+0x16/0x55 -[ 126.060566] [] rt_mutex_unlock+0x27/0x29 -[ 126.060569] [] rcu_read_unlock_special+0x17e/0x1c4 -[ 126.060573] [] __rcu_read_unlock+0x48/0x89 -[ 126.060576] [] select_task_rq_rt+0xc7/0xd5 -[ 126.060580] [] try_to_wake_up+0x175/0x429 -[ 126.060583] [] wake_up_process+0x15/0x17 -[ 126.060585] [] wakeup_softirqd+0x24/0x26 -[ 126.060590] [] irq_exit+0x49/0x55 -[ 126.060593] [] smp_apic_timer_interrupt+0x8a/0x98 -[ 126.060597] [] apic_timer_interrupt+0x13/0x20 -[ 126.060600] [] irq_forced_thread_fn+0x1b/0x44 -[ 126.060603] [] irq_thread+0xde/0x1af -[ 126.060606] [] kthread+0x99/0xa1 -[ 126.060608] [] kernel_thread_helper+0x4/0x10 -[ 126.060611] -[ 126.060612] other info that might help us debug this: -[ 126.060614] -[ 126.060615] Possible unsafe locking scenario: -[ 126.060616] -[ 126.060617] CPU0 CPU1 -[ 126.060619] ---- ---- -[ 126.060620] lock(&p->pi_lock); -[ 126.060623] lock(&(lock)->wait_lock); -[ 126.060625] lock(&p->pi_lock); -[ 126.060627] lock(&(lock)->wait_lock); -[ 126.060629] -[ 126.060629] *** DEADLOCK *** -[ 126.060630] -[ 126.060632] 1 lock held by irq/24-eth0/1235: -[ 126.060633] #0: (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 -[ 126.060638] -[ 126.060638] stack backtrace: -[ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30 -[ 126.060643] Call Trace: -[ 126.060644] [] print_circular_bug+0x289/0x29a -[ 126.060651] [] __lock_acquire+0x1157/0x1816 -[ 126.060655] [] ? trace_hardirqs_off_caller+0x1f/0x99 -[ 126.060658] [] ? rt_mutex_slowunlock+0x16/0x55 -[ 126.060661] [] lock_acquire+0x145/0x18a -[ 126.060664] [] ? rt_mutex_slowunlock+0x16/0x55 -[ 126.060668] [] _raw_spin_lock+0x40/0x73 -[ 126.060671] [] ? rt_mutex_slowunlock+0x16/0x55 -[ 126.060674] [] ? rcu_report_qs_rsp+0x87/0x8c -[ 126.060677] [] rt_mutex_slowunlock+0x16/0x55 -[ 126.060680] [] ? rcu_read_unlock_special+0x9b/0x1c4 -[ 126.060683] [] rt_mutex_unlock+0x27/0x29 -[ 126.060687] [] rcu_read_unlock_special+0x17e/0x1c4 -[ 126.060690] [] __rcu_read_unlock+0x48/0x89 -[ 126.060693] [] select_task_rq_rt+0xc7/0xd5 -[ 126.060696] [] ? select_task_rq_rt+0x27/0xd5 -[ 126.060701] [] ? clockevents_program_event+0x8e/0x90 -[ 126.060704] [] try_to_wake_up+0x175/0x429 -[ 126.060708] [] ? tick_program_event+0x1f/0x21 -[ 126.060711] [] wake_up_process+0x15/0x17 -[ 126.060715] [] wakeup_softirqd+0x24/0x26 -[ 126.060718] [] irq_exit+0x49/0x55 -[ 126.060721] [] smp_apic_timer_interrupt+0x8a/0x98 -[ 126.060724] [] apic_timer_interrupt+0x13/0x20 -[ 126.060726] [] ? migrate_disable+0x75/0x12d -[ 126.060733] [] ? local_bh_disable+0xe/0x1f -[ 126.060736] [] ? local_bh_disable+0x1d/0x1f -[ 126.060739] [] irq_forced_thread_fn+0x1b/0x44 -[ 126.060742] [] ? _raw_spin_unlock_irq+0x3b/0x59 -[ 126.060745] [] irq_thread+0xde/0x1af -[ 126.060748] [] ? irq_thread_fn+0x3a/0x3a -[ 126.060751] [] ? irq_finalize_oneshot+0xd1/0xd1 -[ 126.060754] [] ? irq_finalize_oneshot+0xd1/0xd1 -[ 126.060757] [] kthread+0x99/0xa1 -[ 126.060761] [] kernel_thread_helper+0x4/0x10 -[ 126.060764] [] ? finish_task_switch+0x87/0x10a -[ 126.060768] [] ? retint_restore_args+0xe/0xe -[ 126.060771] [] ? __init_kthread_worker+0x8c/0x8c -[ 126.060774] [] ? gs_change+0xb/0xb - -Because irq_exit() does: - -void irq_exit(void) -{ - account_system_vtime(current); - trace_hardirq_exit(); - sub_preempt_count(IRQ_EXIT_OFFSET); - if (!in_interrupt() && local_softirq_pending()) - invoke_softirq(); - - ... -} - -Which triggers a wakeup, which uses RCU, now if the interrupted task has -t->rcu_read_unlock_special set, the rcu usage from the wakeup will end -up in rcu_read_unlock_special(). rcu_read_unlock_special() will test -for in_irq(), which will fail as we just decremented preempt_count -with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for -PREEMPT_RT_FULL reads: - -int in_serving_softirq(void) -{ - int res; - - preempt_disable(); - res = __get_cpu_var(local_softirq_runner) == current; - preempt_enable(); - return res; -} - -Which will thus also fail, resulting in the above wreckage. - -The 'somewhat' ugly solution is to open-code the preempt_count() test -in rcu_read_unlock_special(). - -Also, we're not at all sure how ->rcu_read_unlock_special gets set -here... so this is very likely a bandaid and more thought is required. - -Cc: Paul E. McKenney -Signed-off-by: Peter Zijlstra ---- - kernel/rcu/tree_plugin.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index 35f3552b7463..e9ce51e19e87 100644 ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -524,7 +524,7 @@ static void rcu_read_unlock_special(struct task_struct *t) - } - - /* Hardware IRQ handlers cannot block, complain if they get here. */ -- if (in_irq() || in_serving_softirq()) { -+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { - lockdep_rcu_suspicious(__FILE__, __LINE__, - "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); - pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch b/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch deleted file mode 100644 index c7562225e..000000000 --- a/kernel/patches-4.19.x-rt/0159-rcu-Merge-RCU-bh-into-RCU-preempt.patch +++ /dev/null @@ -1,350 +0,0 @@ -From 4e202b7ae394bcf87490c85f7edf3c528a9e93ed Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 5 Oct 2011 11:59:38 -0700 -Subject: [PATCH 159/328] rcu: Merge RCU-bh into RCU-preempt - -The Linux kernel has long RCU-bh read-side critical sections that -intolerably increase scheduling latency under mainline's RCU-bh rules, -which include RCU-bh read-side critical sections being non-preemptible. -This patch therefore arranges for RCU-bh to be implemented in terms of -RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. - -This has the downside of defeating the purpose of RCU-bh, namely, -handling the case where the system is subjected to a network-based -denial-of-service attack that keeps at least one CPU doing full-time -softirq processing. This issue will be fixed by a later commit. - -The current commit will need some work to make it appropriate for -mainline use, for example, it needs to be extended to cover Tiny RCU. - -[ paulmck: Added a useful changelog ] - -Signed-off-by: Thomas Gleixner -Signed-off-by: Paul E. McKenney -Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com -Signed-off-by: Thomas Gleixner ---- - include/linux/rcupdate.h | 19 +++++++++++++++++++ - include/linux/rcutree.h | 8 ++++++++ - kernel/rcu/rcu.h | 11 +++++++++-- - kernel/rcu/rcutorture.c | 7 +++++++ - kernel/rcu/tree.c | 26 ++++++++++++++++++++++++++ - kernel/rcu/tree.h | 2 ++ - kernel/rcu/update.c | 2 ++ - 7 files changed, 73 insertions(+), 2 deletions(-) - -diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h -index e6733d7911e9..08d64e5713fc 100644 ---- a/include/linux/rcupdate.h -+++ b/include/linux/rcupdate.h -@@ -56,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); - #define call_rcu call_rcu_sched - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define call_rcu_bh call_rcu -+#else - void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); -+#endif - void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); - void synchronize_sched(void); - void rcu_barrier_tasks(void); -@@ -261,7 +265,14 @@ extern struct lockdep_map rcu_sched_lock_map; - extern struct lockdep_map rcu_callback_map; - int debug_lockdep_rcu_enabled(void); - int rcu_read_lock_held(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int rcu_read_lock_bh_held(void) -+{ -+ return rcu_read_lock_held(); -+} -+#else - int rcu_read_lock_bh_held(void); -+#endif - int rcu_read_lock_sched_held(void); - - #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -@@ -661,10 +672,14 @@ static inline void rcu_read_unlock(void) - static inline void rcu_read_lock_bh(void) - { - local_bh_disable(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_lock(); -+#else - __acquire(RCU_BH); - rcu_lock_acquire(&rcu_bh_lock_map); - RCU_LOCKDEP_WARN(!rcu_is_watching(), - "rcu_read_lock_bh() used illegally while idle"); -+#endif - } - - /* -@@ -674,10 +689,14 @@ static inline void rcu_read_lock_bh(void) - */ - static inline void rcu_read_unlock_bh(void) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_unlock(); -+#else - RCU_LOCKDEP_WARN(!rcu_is_watching(), - "rcu_read_unlock_bh() used illegally while idle"); - rcu_lock_release(&rcu_bh_lock_map); - __release(RCU_BH); -+#endif - local_bh_enable(); - } - -diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h -index 914655848ef6..462ce061bac7 100644 ---- a/include/linux/rcutree.h -+++ b/include/linux/rcutree.h -@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) - rcu_note_context_switch(false); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define synchronize_rcu_bh synchronize_rcu -+#else - void synchronize_rcu_bh(void); -+#endif - void synchronize_sched_expedited(void); - void synchronize_rcu_expedited(void); - -@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void) - } - - void rcu_barrier(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define rcu_barrier_bh rcu_barrier -+#else - void rcu_barrier_bh(void); -+#endif - void rcu_barrier_sched(void); - bool rcu_eqs_special_set(int cpu); - unsigned long get_state_synchronize_rcu(void); -diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h -index 4d04683c31b2..808cce9a5d43 100644 ---- a/kernel/rcu/rcu.h -+++ b/kernel/rcu/rcu.h -@@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads(void) { } - static inline int rcu_get_gp_kthreads_prio(void) { return 0; } - #else /* #ifdef CONFIG_TINY_RCU */ - unsigned long rcu_get_gp_seq(void); --unsigned long rcu_bh_get_gp_seq(void); - unsigned long rcu_sched_get_gp_seq(void); - unsigned long rcu_exp_batches_completed(void); - unsigned long rcu_exp_batches_completed_sched(void); -@@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp); - void show_rcu_gp_kthreads(void); - int rcu_get_gp_kthreads_prio(void); - void rcu_force_quiescent_state(void); --void rcu_bh_force_quiescent_state(void); - void rcu_sched_force_quiescent_state(void); - extern struct workqueue_struct *rcu_gp_wq; - extern struct workqueue_struct *rcu_par_gp_wq; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define rcu_bh_get_gp_seq rcu_get_gp_seq -+#define rcu_bh_force_quiescent_state rcu_force_quiescent_state -+#else -+unsigned long rcu_bh_get_gp_seq(void); -+void rcu_bh_force_quiescent_state(void); -+#endif -+ - #endif /* #else #ifdef CONFIG_TINY_RCU */ - - #ifdef CONFIG_RCU_NOCB_CPU -diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c -index 0b7af7e2bcbb..e95d121efc80 100644 ---- a/kernel/rcu/rcutorture.c -+++ b/kernel/rcu/rcutorture.c -@@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = { - .name = "rcu" - }; - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Definitions for rcu_bh torture testing. - */ -@@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops = { - .name = "rcu_bh" - }; - -+#else -+static struct rcu_torture_ops rcu_bh_ops = { -+ .ttype = INVALID_RCU_FLAVOR, -+}; -+#endif -+ - /* - * Don't even think about trying any of these in real life!!! - * The names includes "busted", and they really means it! -diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index f7e89c989df7..1456a3d97971 100644 ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -244,6 +244,7 @@ void rcu_sched_qs(void) - this_cpu_ptr(&rcu_sched_data), true); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void rcu_bh_qs(void) - { - RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); -@@ -254,6 +255,11 @@ void rcu_bh_qs(void) - __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); - } - } -+#else -+void rcu_bh_qs(void) -+{ -+} -+#endif - - /* - * Steal a bit from the bottom of ->dynticks for idle entry/exit -@@ -568,6 +574,7 @@ unsigned long rcu_sched_get_gp_seq(void) - } - EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Return the number of RCU-bh GPs completed thus far for debug & stats. - */ -@@ -576,6 +583,7 @@ unsigned long rcu_bh_get_gp_seq(void) - return READ_ONCE(rcu_bh_state.gp_seq); - } - EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); -+#endif - - /* - * Return the number of RCU expedited batches completed thus far for -@@ -599,6 +607,7 @@ unsigned long rcu_exp_batches_completed_sched(void) - } - EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Force a quiescent state. - */ -@@ -617,6 +626,13 @@ void rcu_bh_force_quiescent_state(void) - } - EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); - -+#else -+void rcu_force_quiescent_state(void) -+{ -+} -+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); -+#endif -+ - /* - * Force a quiescent state for RCU-sched. - */ -@@ -674,9 +690,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, - case RCU_FLAVOR: - rsp = rcu_state_p; - break; -+#ifndef CONFIG_PREEMPT_RT_FULL - case RCU_BH_FLAVOR: - rsp = &rcu_bh_state; - break; -+#endif - case RCU_SCHED_FLAVOR: - rsp = &rcu_sched_state; - break; -@@ -3057,6 +3075,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) - } - EXPORT_SYMBOL_GPL(call_rcu_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. -@@ -3084,6 +3103,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) - __call_rcu(head, func, &rcu_bh_state, -1, 0); - } - EXPORT_SYMBOL_GPL(call_rcu_bh); -+#endif - - /* - * Queue an RCU callback for lazy invocation after a grace period. -@@ -3169,6 +3189,7 @@ void synchronize_sched(void) - } - EXPORT_SYMBOL_GPL(synchronize_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. - * -@@ -3195,6 +3216,7 @@ void synchronize_rcu_bh(void) - wait_rcu_gp(call_rcu_bh); - } - EXPORT_SYMBOL_GPL(synchronize_rcu_bh); -+#endif - - /** - * get_state_synchronize_rcu - Snapshot current RCU state -@@ -3502,6 +3524,7 @@ static void _rcu_barrier(struct rcu_state *rsp) - mutex_unlock(&rsp->barrier_mutex); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. - */ -@@ -3510,6 +3533,7 @@ void rcu_barrier_bh(void) - _rcu_barrier(&rcu_bh_state); - } - EXPORT_SYMBOL_GPL(rcu_barrier_bh); -+#endif - - /** - * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. -@@ -4157,7 +4181,9 @@ void __init rcu_init(void) - - rcu_bootup_announce(); - rcu_init_geometry(); -+#ifndef CONFIG_PREEMPT_RT_FULL - rcu_init_one(&rcu_bh_state); -+#endif - rcu_init_one(&rcu_sched_state); - if (dump_tree) - rcu_dump_rcu_node_tree(&rcu_sched_state); -diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h -index 4e74df768c57..fbbff7c21148 100644 ---- a/kernel/rcu/tree.h -+++ b/kernel/rcu/tree.h -@@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavors; - */ - extern struct rcu_state rcu_sched_state; - -+#ifndef CONFIG_PREEMPT_RT_FULL - extern struct rcu_state rcu_bh_state; -+#endif - - #ifdef CONFIG_PREEMPT_RCU - extern struct rcu_state rcu_preempt_state; -diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c -index 81688a133552..6ffafb1b1584 100644 ---- a/kernel/rcu/update.c -+++ b/kernel/rcu/update.c -@@ -288,6 +288,7 @@ int rcu_read_lock_held(void) - } - EXPORT_SYMBOL_GPL(rcu_read_lock_held); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? - * -@@ -314,6 +315,7 @@ int rcu_read_lock_bh_held(void) - return in_softirq() || irqs_disabled(); - } - EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); -+#endif - - #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch b/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch deleted file mode 100644 index b90c3f3bb..000000000 --- a/kernel/patches-4.19.x-rt/0160-rcu-Make-ksoftirqd-do-RCU-quiescent-states.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 2342184568653d14130d3ce56aec7fcf39e5bf5e Mon Sep 17 00:00:00 2001 -From: "Paul E. McKenney" -Date: Wed, 5 Oct 2011 11:45:18 -0700 -Subject: [PATCH 160/328] rcu: Make ksoftirqd do RCU quiescent states - -Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable -to network-based denial-of-service attacks. This patch therefore -makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() -is running in ksoftirqd context. A wrapper layer in interposed so that -other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying -function __do_softirq_common() does the actual work. - -The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is -that there might be a local_bh_enable() inside an RCU-preempt read-side -critical section. This local_bh_enable() can invoke __do_softirq() -directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just -calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be -an illegal RCU-preempt quiescent state in the middle of an RCU-preempt -read-side critical section. Therefore, quiescent states can only happen -in cases where __do_softirq() is invoked directly from ksoftirqd. - -Signed-off-by: Paul E. McKenney -Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com -Signed-off-by: Thomas Gleixner ---- - kernel/rcu/tree.c | 18 +++++++++++++----- - kernel/rcu/tree_plugin.h | 8 +++++++- - 2 files changed, 20 insertions(+), 6 deletions(-) - -diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index 1456a3d97971..1a40e3d44cb8 100644 ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -244,7 +244,19 @@ void rcu_sched_qs(void) - this_cpu_ptr(&rcu_sched_data), true); - } - --#ifndef CONFIG_PREEMPT_RT_FULL -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void rcu_preempt_qs(void); -+ -+void rcu_bh_qs(void) -+{ -+ unsigned long flags; -+ -+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ -+ local_irq_save(flags); -+ rcu_preempt_qs(); -+ local_irq_restore(flags); -+} -+#else - void rcu_bh_qs(void) - { - RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); -@@ -255,10 +267,6 @@ void rcu_bh_qs(void) - __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); - } - } --#else --void rcu_bh_qs(void) --{ --} - #endif - - /* -diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index e9ce51e19e87..938e64c69d18 100644 ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - #include - #include "../time/tick-internal.h" -@@ -1408,7 +1409,7 @@ static void rcu_prepare_kthreads(int cpu) - - #endif /* #else #ifdef CONFIG_RCU_BOOST */ - --#if !defined(CONFIG_RCU_FAST_NO_HZ) -+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) - - /* - * Check to see if any future RCU-related work will need to be done -@@ -1424,7 +1425,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) - *nextevt = KTIME_MAX; - return rcu_cpu_has_callbacks(NULL); - } -+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ - -+#if !defined(CONFIG_RCU_FAST_NO_HZ) - /* - * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up - * after it. -@@ -1521,6 +1524,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) - return cbs_ready; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready - * to invoke. If the CPU has callbacks, try to advance them. Tell the -@@ -1563,6 +1568,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) - *nextevt = basemono + dj * TICK_NSEC; - return 0; - } -+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ - - /* - * Prepare a CPU for idle from an RCU perspective. The first major task --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch b/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch deleted file mode 100644 index 35cae149e..000000000 --- a/kernel/patches-4.19.x-rt/0161-rcu-Eliminate-softirq-processing-from-rcutree.patch +++ /dev/null @@ -1,422 +0,0 @@ -From 7f9bb7afbe82ac923286da26040cd1aea23359aa Mon Sep 17 00:00:00 2001 -From: "Paul E. McKenney" -Date: Mon, 4 Nov 2013 13:21:10 -0800 -Subject: [PATCH 161/328] rcu: Eliminate softirq processing from rcutree - -Running RCU out of softirq is a problem for some workloads that would -like to manage RCU core processing independently of other softirq work, -for example, setting kthread priority. This commit therefore moves the -RCU core work from softirq to a per-CPU/per-flavor SCHED_OTHER kthread -named rcuc. The SCHED_OTHER approach avoids the scalability problems -that appeared with the earlier attempt to move RCU core processing to -from softirq to kthreads. That said, kernels built with RCU_BOOST=y -will run the rcuc kthreads at the RCU-boosting priority. - -Reported-by: Thomas Gleixner -Tested-by: Mike Galbraith -Signed-off-by: Paul E. McKenney -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tree.c | 114 ++++++++++++++++++++++++++++--- - kernel/rcu/tree.h | 4 +- - kernel/rcu/tree_plugin.h | 142 +++------------------------------------ - 3 files changed, 115 insertions(+), 145 deletions(-) - -diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index 1a40e3d44cb8..ae716ca783bc 100644 ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -61,6 +61,13 @@ - #include - #include - #include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../time/tick-internal.h" - - #include "tree.h" - #include "rcu.h" -@@ -2896,18 +2903,17 @@ __rcu_process_callbacks(struct rcu_state *rsp) - /* - * Do RCU core processing for the current CPU. - */ --static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) -+static __latent_entropy void rcu_process_callbacks(void) - { - struct rcu_state *rsp; - - if (cpu_is_offline(smp_processor_id())) - return; -- trace_rcu_utilization(TPS("Start RCU core")); - for_each_rcu_flavor(rsp) - __rcu_process_callbacks(rsp); -- trace_rcu_utilization(TPS("End RCU core")); - } - -+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); - /* - * Schedule RCU callback invocation. If the specified type of RCU - * does not support RCU priority boosting, just do a direct call, -@@ -2919,18 +2925,105 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) - { - if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) - return; -- if (likely(!rsp->boost)) { -- rcu_do_batch(rsp, rdp); -- return; -- } -- invoke_rcu_callbacks_kthread(); -+ rcu_do_batch(rsp, rdp); -+} -+ -+static void rcu_wake_cond(struct task_struct *t, int status) -+{ -+ /* -+ * If the thread is yielding, only wake it when this -+ * is invoked from idle -+ */ -+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) -+ wake_up_process(t); - } - -+/* -+ * Wake up this CPU's rcuc kthread to do RCU core processing. -+ */ - static void invoke_rcu_core(void) - { -- if (cpu_online(smp_processor_id())) -- raise_softirq(RCU_SOFTIRQ); -+ unsigned long flags; -+ struct task_struct *t; -+ -+ if (!cpu_online(smp_processor_id())) -+ return; -+ local_irq_save(flags); -+ __this_cpu_write(rcu_cpu_has_work, 1); -+ t = __this_cpu_read(rcu_cpu_kthread_task); -+ if (t != NULL && current != t) -+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); -+ local_irq_restore(flags); -+} -+ -+static void rcu_cpu_kthread_park(unsigned int cpu) -+{ -+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -+} -+ -+static int rcu_cpu_kthread_should_run(unsigned int cpu) -+{ -+ return __this_cpu_read(rcu_cpu_has_work); -+} -+ -+/* -+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the -+ * RCU softirq used in flavors and configurations of RCU that do not -+ * support RCU priority boosting. -+ */ -+static void rcu_cpu_kthread(unsigned int cpu) -+{ -+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); -+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); -+ int spincnt; -+ -+ for (spincnt = 0; spincnt < 10; spincnt++) { -+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); -+ local_bh_disable(); -+ *statusp = RCU_KTHREAD_RUNNING; -+ this_cpu_inc(rcu_cpu_kthread_loops); -+ local_irq_disable(); -+ work = *workp; -+ *workp = 0; -+ local_irq_enable(); -+ if (work) -+ rcu_process_callbacks(); -+ local_bh_enable(); -+ if (*workp == 0) { -+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); -+ *statusp = RCU_KTHREAD_WAITING; -+ return; -+ } -+ } -+ *statusp = RCU_KTHREAD_YIELDING; -+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); -+ schedule_timeout_interruptible(2); -+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); -+ *statusp = RCU_KTHREAD_WAITING; -+} -+ -+static struct smp_hotplug_thread rcu_cpu_thread_spec = { -+ .store = &rcu_cpu_kthread_task, -+ .thread_should_run = rcu_cpu_kthread_should_run, -+ .thread_fn = rcu_cpu_kthread, -+ .thread_comm = "rcuc/%u", -+ .setup = rcu_cpu_kthread_setup, -+ .park = rcu_cpu_kthread_park, -+}; -+ -+/* -+ * Spawn per-CPU RCU core processing kthreads. -+ */ -+static int __init rcu_spawn_core_kthreads(void) -+{ -+ int cpu; -+ -+ for_each_possible_cpu(cpu) -+ per_cpu(rcu_cpu_has_work, cpu) = 0; -+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); -+ return 0; - } -+early_initcall(rcu_spawn_core_kthreads); - - /* - * Handle any core-RCU processing required by a call_rcu() invocation. -@@ -4196,7 +4289,6 @@ void __init rcu_init(void) - if (dump_tree) - rcu_dump_rcu_node_tree(&rcu_sched_state); - __rcu_init_preempt(); -- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); - - /* - * We don't need protection against CPU-hotplug here because -diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h -index fbbff7c21148..98257d20feb2 100644 ---- a/kernel/rcu/tree.h -+++ b/kernel/rcu/tree.h -@@ -423,12 +423,10 @@ extern struct rcu_state rcu_preempt_state; - - int rcu_dynticks_snap(struct rcu_dynticks *rdtp); - --#ifdef CONFIG_RCU_BOOST - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); - DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); - DECLARE_PER_CPU(char, rcu_cpu_has_work); --#endif /* #ifdef CONFIG_RCU_BOOST */ - - #ifndef RCU_TREE_NONCORE - -@@ -451,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, - int ncheck); - static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); - static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); --static void invoke_rcu_callbacks_kthread(void); - static bool rcu_is_callbacks_kthread(void); -+static void rcu_cpu_kthread_setup(unsigned int cpu); - #ifdef CONFIG_RCU_BOOST - static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp); -diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h -index 938e64c69d18..56639c8ed550 100644 ---- a/kernel/rcu/tree_plugin.h -+++ b/kernel/rcu/tree_plugin.h -@@ -24,42 +24,16 @@ - * Paul E. McKenney - */ - --#include --#include --#include --#include --#include --#include --#include --#include --#include "../time/tick-internal.h" -- --#ifdef CONFIG_RCU_BOOST -- - #include "../locking/rtmutex_common.h" - - /* - * Control variables for per-CPU and per-rcu_node kthreads. These - * handle all flavors of RCU. - */ --static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); - DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); - DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); - DEFINE_PER_CPU(char, rcu_cpu_has_work); - --#else /* #ifdef CONFIG_RCU_BOOST */ -- --/* -- * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, -- * all uses are in dead code. Provide a definition to keep the compiler -- * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. -- * This probably needs to be excluded from -rt builds. -- */ --#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) --#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) -- --#endif /* #else #ifdef CONFIG_RCU_BOOST */ -- - #ifdef CONFIG_RCU_NOCB_CPU - static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ - static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ -@@ -1028,18 +1002,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) - - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+/* -+ * If boosting, set rcuc kthreads to realtime priority. -+ */ -+static void rcu_cpu_kthread_setup(unsigned int cpu) -+{ - #ifdef CONFIG_RCU_BOOST -+ struct sched_param sp; - --static void rcu_wake_cond(struct task_struct *t, int status) --{ -- /* -- * If the thread is yielding, only wake it when this -- * is invoked from idle -- */ -- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) -- wake_up_process(t); -+ sp.sched_priority = kthread_prio; -+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -+#endif /* #ifdef CONFIG_RCU_BOOST */ - } - -+#ifdef CONFIG_RCU_BOOST -+ - /* - * Carry out RCU priority boosting on the task indicated by ->exp_tasks - * or ->boost_tasks, advancing the pointer to the next task in the -@@ -1177,23 +1154,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) - } - } - --/* -- * Wake up the per-CPU kthread to invoke RCU callbacks. -- */ --static void invoke_rcu_callbacks_kthread(void) --{ -- unsigned long flags; -- -- local_irq_save(flags); -- __this_cpu_write(rcu_cpu_has_work, 1); -- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && -- current != __this_cpu_read(rcu_cpu_kthread_task)) { -- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), -- __this_cpu_read(rcu_cpu_kthread_status)); -- } -- local_irq_restore(flags); --} -- - /* - * Is the current CPU running the RCU-callbacks kthread? - * Caller must have preemption disabled. -@@ -1248,67 +1208,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - return 0; - } - --static void rcu_kthread_do_work(void) --{ -- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); -- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); -- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); --} -- --static void rcu_cpu_kthread_setup(unsigned int cpu) --{ -- struct sched_param sp; -- -- sp.sched_priority = kthread_prio; -- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); --} -- --static void rcu_cpu_kthread_park(unsigned int cpu) --{ -- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; --} -- --static int rcu_cpu_kthread_should_run(unsigned int cpu) --{ -- return __this_cpu_read(rcu_cpu_has_work); --} -- --/* -- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the -- * RCU softirq used in flavors and configurations of RCU that do not -- * support RCU priority boosting. -- */ --static void rcu_cpu_kthread(unsigned int cpu) --{ -- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); -- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); -- int spincnt; -- -- for (spincnt = 0; spincnt < 10; spincnt++) { -- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); -- local_bh_disable(); -- *statusp = RCU_KTHREAD_RUNNING; -- this_cpu_inc(rcu_cpu_kthread_loops); -- local_irq_disable(); -- work = *workp; -- *workp = 0; -- local_irq_enable(); -- if (work) -- rcu_kthread_do_work(); -- local_bh_enable(); -- if (*workp == 0) { -- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); -- *statusp = RCU_KTHREAD_WAITING; -- return; -- } -- } -- *statusp = RCU_KTHREAD_YIELDING; -- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); -- schedule_timeout_interruptible(2); -- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); -- *statusp = RCU_KTHREAD_WAITING; --} -- - /* - * Set the per-rcu_node kthread's affinity to cover all CPUs that are - * served by the rcu_node in question. The CPU hotplug lock is still -@@ -1339,26 +1238,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) - free_cpumask_var(cm); - } - --static struct smp_hotplug_thread rcu_cpu_thread_spec = { -- .store = &rcu_cpu_kthread_task, -- .thread_should_run = rcu_cpu_kthread_should_run, -- .thread_fn = rcu_cpu_kthread, -- .thread_comm = "rcuc/%u", -- .setup = rcu_cpu_kthread_setup, -- .park = rcu_cpu_kthread_park, --}; -- - /* - * Spawn boost kthreads -- called as soon as the scheduler is running. - */ - static void __init rcu_spawn_boost_kthreads(void) - { - struct rcu_node *rnp; -- int cpu; -- -- for_each_possible_cpu(cpu) -- per_cpu(rcu_cpu_has_work, cpu) = 0; -- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); - rcu_for_each_leaf_node(rcu_state_p, rnp) - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); - } -@@ -1381,11 +1266,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } - --static void invoke_rcu_callbacks_kthread(void) --{ -- WARN_ON_ONCE(1); --} -- - static bool rcu_is_callbacks_kthread(void) - { - return false; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch b/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch deleted file mode 100644 index cd6abf438..000000000 --- a/kernel/patches-4.19.x-rt/0162-srcu-use-cpu_online-instead-custom-check.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 720a2bda656758067e8cf4f33a33a4e3900133e0 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 13 Sep 2017 14:43:41 +0200 -Subject: [PATCH 162/328] srcu: use cpu_online() instead custom check - -The current check via srcu_online is slightly racy because after looking -at srcu_online there could be an interrupt that interrupted us long -enough until the CPU we checked against went offline. -An alternative would be to hold the hotplug rwsem (so the CPUs don't -change their state) and then check based on cpu_online() if we queue it -on a specific CPU or not. queue_work_on() itself can handle if something -is enqueued on an offline CPU but a timer which is enqueued on an offline -CPU won't fire until the CPU is back online. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/srcutree.c | 22 ++++------------------ - kernel/rcu/tree.c | 4 ---- - 2 files changed, 4 insertions(+), 22 deletions(-) - -diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c -index 1ff17e297f0c..df0375453ba1 100644 ---- a/kernel/rcu/srcutree.c -+++ b/kernel/rcu/srcutree.c -@@ -38,6 +38,7 @@ - #include - #include - #include -+#include - - #include "rcu.h" - #include "rcu_segcblist.h" -@@ -460,21 +461,6 @@ static void srcu_gp_start(struct srcu_struct *sp) - WARN_ON_ONCE(state != SRCU_STATE_SCAN1); - } - --/* -- * Track online CPUs to guide callback workqueue placement. -- */ --DEFINE_PER_CPU(bool, srcu_online); -- --void srcu_online_cpu(unsigned int cpu) --{ -- WRITE_ONCE(per_cpu(srcu_online, cpu), true); --} -- --void srcu_offline_cpu(unsigned int cpu) --{ -- WRITE_ONCE(per_cpu(srcu_online, cpu), false); --} -- - /* - * Place the workqueue handler on the specified CPU if online, otherwise - * just run it whereever. This is useful for placing workqueue handlers -@@ -486,12 +472,12 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - { - bool ret; - -- preempt_disable(); -- if (READ_ONCE(per_cpu(srcu_online, cpu))) -+ cpus_read_lock(); -+ if (cpu_online(cpu)) - ret = queue_delayed_work_on(cpu, wq, dwork, delay); - else - ret = queue_delayed_work(wq, dwork, delay); -- preempt_enable(); -+ cpus_read_unlock(); - return ret; - } - -diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index ae716ca783bc..f162a4f54b05 100644 ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -3784,8 +3784,6 @@ int rcutree_online_cpu(unsigned int cpu) - rnp->ffmask |= rdp->grpmask; - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } -- if (IS_ENABLED(CONFIG_TREE_SRCU)) -- srcu_online_cpu(cpu); - if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) - return 0; /* Too early in boot for scheduler work. */ - sync_sched_exp_online_cleanup(cpu); -@@ -3813,8 +3811,6 @@ int rcutree_offline_cpu(unsigned int cpu) - } - - rcutree_affinity_setting(cpu, cpu); -- if (IS_ENABLED(CONFIG_TREE_SRCU)) -- srcu_offline_cpu(cpu); - return 0; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch b/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch deleted file mode 100644 index aa3549aa6..000000000 --- a/kernel/patches-4.19.x-rt/0163-srcu-replace-local_irqsave-with-a-locallock.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 35f458b1ba68a97ce57a87ae2cf6947b271abad2 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 12 Oct 2017 18:37:12 +0200 -Subject: [PATCH 163/328] srcu: replace local_irqsave() with a locallock - -There are two instances which disable interrupts in order to become a -stable this_cpu_ptr() pointer. The restore part is coupled with -spin_unlock_irqrestore() which does not work on RT. -Replace the local_irq_save() call with the appropriate local_lock() -version of it. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/srcutree.c | 14 +++++++++----- - 1 file changed, 9 insertions(+), 5 deletions(-) - -diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c -index df0375453ba1..0f09a1a9e17c 100644 ---- a/kernel/rcu/srcutree.c -+++ b/kernel/rcu/srcutree.c -@@ -39,6 +39,7 @@ - #include - #include - #include -+#include - - #include "rcu.h" - #include "rcu_segcblist.h" -@@ -760,6 +761,8 @@ static void srcu_flip(struct srcu_struct *sp) - * negligible when amoritized over that time period, and the extra latency - * of a needlessly non-expedited grace period is similarly negligible. - */ -+static DEFINE_LOCAL_IRQ_LOCK(sp_llock); -+ - static bool srcu_might_be_idle(struct srcu_struct *sp) - { - unsigned long curseq; -@@ -768,13 +771,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) - unsigned long t; - - /* If the local srcu_data structure has callbacks, not idle. */ -- local_irq_save(flags); -+ local_lock_irqsave(sp_llock, flags); - sdp = this_cpu_ptr(sp->sda); - if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { -- local_irq_restore(flags); -+ local_unlock_irqrestore(sp_llock, flags); - return false; /* Callbacks already present, so not idle. */ - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(sp_llock, flags); - - /* - * No local callbacks, so probabalistically probe global state. -@@ -852,7 +855,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, - return; - } - rhp->func = func; -- local_irq_save(flags); -+ local_lock_irqsave(sp_llock, flags); - sdp = this_cpu_ptr(sp->sda); - spin_lock_rcu_node(sdp); - rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); -@@ -868,7 +871,8 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, - sdp->srcu_gp_seq_needed_exp = s; - needexp = true; - } -- spin_unlock_irqrestore_rcu_node(sdp, flags); -+ spin_unlock_rcu_node(sdp); -+ local_unlock_irqrestore(sp_llock, flags); - if (needgp) - srcu_funnel_gp_start(sp, sdp, s, do_norm); - else if (needexp) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch deleted file mode 100644 index 1a10cb916..000000000 --- a/kernel/patches-4.19.x-rt/0164-rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 266bcdc7bf5bb5736cd2b6896166438f40cf815a Mon Sep 17 00:00:00 2001 -From: Julia Cartwright -Date: Wed, 12 Oct 2016 11:21:14 -0500 -Subject: [PATCH 164/328] rcu: enable rcu_normal_after_boot by default for RT - -The forcing of an expedited grace period is an expensive and very -RT-application unfriendly operation, as it forcibly preempts all running -tasks on CPUs which are preventing the gp from expiring. - -By default, as a policy decision, disable the expediting of grace -periods (after boot) on configurations which enable PREEMPT_RT_FULL. - -Suggested-by: Luiz Capitulino -Acked-by: Paul E. McKenney -Signed-off-by: Julia Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/update.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c -index 6ffafb1b1584..16d8dba23329 100644 ---- a/kernel/rcu/update.c -+++ b/kernel/rcu/update.c -@@ -68,7 +68,7 @@ extern int rcu_expedited; /* from sysctl */ - module_param(rcu_expedited, int, 0); - extern int rcu_normal; /* from sysctl */ - module_param(rcu_normal, int, 0); --static int rcu_normal_after_boot; -+static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); - module_param(rcu_normal_after_boot, int, 0); - #endif /* #ifndef CONFIG_TINY_RCU */ - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch b/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch deleted file mode 100644 index b5b9d134d..000000000 --- a/kernel/patches-4.19.x-rt/0165-tty-serial-omap-Make-the-locking-RT-aware.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 3d60ca27280fe12e0b29927dad783e20bcc391df Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 28 Jul 2011 13:32:57 +0200 -Subject: [PATCH 165/328] tty/serial/omap: Make the locking RT aware - -The lock is a sleeping lock and local_irq_save() is not the -optimsation we are looking for. Redo it to make it work on -RT and -non-RT. - -Signed-off-by: Thomas Gleixner ---- - drivers/tty/serial/omap-serial.c | 12 ++++-------- - 1 file changed, 4 insertions(+), 8 deletions(-) - -diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c -index 6420ae581a80..0f4f41ed9ffa 100644 ---- a/drivers/tty/serial/omap-serial.c -+++ b/drivers/tty/serial/omap-serial.c -@@ -1307,13 +1307,10 @@ serial_omap_console_write(struct console *co, const char *s, - - pm_runtime_get_sync(up->dev); - -- local_irq_save(flags); -- if (up->port.sysrq) -- locked = 0; -- else if (oops_in_progress) -- locked = spin_trylock(&up->port.lock); -+ if (up->port.sysrq || oops_in_progress) -+ locked = spin_trylock_irqsave(&up->port.lock, flags); - else -- spin_lock(&up->port.lock); -+ spin_lock_irqsave(&up->port.lock, flags); - - /* - * First save the IER then disable the interrupts -@@ -1342,8 +1339,7 @@ serial_omap_console_write(struct console *co, const char *s, - pm_runtime_mark_last_busy(up->dev); - pm_runtime_put_autosuspend(up->dev); - if (locked) -- spin_unlock(&up->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&up->port.lock, flags); - } - - static int __init --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch b/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch deleted file mode 100644 index 7e97a897f..000000000 --- a/kernel/patches-4.19.x-rt/0166-tty-serial-pl011-Make-the-locking-work-on-RT.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 8122f816435704dfd2fdd2ca02b8399ee22d626c Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 8 Jan 2013 21:36:51 +0100 -Subject: [PATCH 166/328] tty/serial/pl011: Make the locking work on RT - -The lock is a sleeping lock and local_irq_save() is not the optimsation -we are looking for. Redo it to make it work on -RT and non-RT. - -Signed-off-by: Thomas Gleixner ---- - drivers/tty/serial/amba-pl011.c | 15 ++++++++++----- - 1 file changed, 10 insertions(+), 5 deletions(-) - -diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c -index af21122dfade..183e8b731d6a 100644 ---- a/drivers/tty/serial/amba-pl011.c -+++ b/drivers/tty/serial/amba-pl011.c -@@ -2214,13 +2214,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) - - clk_enable(uap->clk); - -- local_irq_save(flags); -+ /* -+ * local_irq_save(flags); -+ * -+ * This local_irq_save() is nonsense. If we come in via sysrq -+ * handling then interrupts are already disabled. Aside of -+ * that the port.sysrq check is racy on SMP regardless. -+ */ - if (uap->port.sysrq) - locked = 0; - else if (oops_in_progress) -- locked = spin_trylock(&uap->port.lock); -+ locked = spin_trylock_irqsave(&uap->port.lock, flags); - else -- spin_lock(&uap->port.lock); -+ spin_lock_irqsave(&uap->port.lock, flags); - - /* - * First save the CR then disable the interrupts -@@ -2246,8 +2252,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) - pl011_write(old_cr, uap, REG_CR); - - if (locked) -- spin_unlock(&uap->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&uap->port.lock, flags); - - clk_disable(uap->clk); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch b/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch deleted file mode 100644 index 3a4c30965..000000000 --- a/kernel/patches-4.19.x-rt/0167-tty-serial-pl011-explicitly-initialize-the-flags-var.patch +++ /dev/null @@ -1,44 +0,0 @@ -From f5a9aefa5442ede43dbf4fa85d3673483e866d61 Mon Sep 17 00:00:00 2001 -From: Kurt Kanzenbach -Date: Mon, 24 Sep 2018 10:29:01 +0200 -Subject: [PATCH 167/328] tty: serial: pl011: explicitly initialize the flags - variable -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Silence the following gcc warning: - -drivers/tty/serial/amba-pl011.c: In function ‘pl011_console_write’: -./include/linux/spinlock.h:260:3: warning: ‘flags’ may be used uninitialized in this function [-Wmaybe-uninitialized] - _raw_spin_unlock_irqrestore(lock, flags); \ - ^~~~~~~~~~~~~~~~~~~~~~~~~~~ -drivers/tty/serial/amba-pl011.c:2214:16: note: ‘flags’ was declared here - unsigned long flags; - ^~~~~ - -The code is correct. Thus, initializing flags to zero doesn't change the -behavior and resolves the warning. - -Signed-off-by: Kurt Kanzenbach -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/tty/serial/amba-pl011.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c -index 183e8b731d6a..2cc6b24bc88d 100644 ---- a/drivers/tty/serial/amba-pl011.c -+++ b/drivers/tty/serial/amba-pl011.c -@@ -2209,7 +2209,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) - { - struct uart_amba_port *uap = amba_ports[co->index]; - unsigned int old_cr = 0, new_cr; -- unsigned long flags; -+ unsigned long flags = 0; - int locked = 1; - - clk_enable(uap->clk); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch b/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch deleted file mode 100644 index 7929d5245..000000000 --- a/kernel/patches-4.19.x-rt/0168-rt-Improve-the-serial-console-PASS_LIMIT.patch +++ /dev/null @@ -1,46 +0,0 @@ -From d8240c961e33c6da86bdbd96a67440a5d944c415 Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Wed, 14 Dec 2011 13:05:54 +0100 -Subject: [PATCH 168/328] rt: Improve the serial console PASS_LIMIT -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Beyond the warning: - - drivers/tty/serial/8250/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable] - -the solution of just looping infinitely was ugly - up it to 1 million to -give it a chance to continue in some really ugly situation. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- - drivers/tty/serial/8250/8250_core.c | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - -diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c -index 69aaee5d7fe1..cf88317a95fc 100644 ---- a/drivers/tty/serial/8250/8250_core.c -+++ b/drivers/tty/serial/8250/8250_core.c -@@ -54,7 +54,16 @@ static struct uart_driver serial8250_reg; - - static unsigned int skip_txen_test; /* force skip of txen test at init time */ - --#define PASS_LIMIT 512 -+/* -+ * On -rt we can have a more delays, and legitimately -+ * so - so don't drop work spuriously and spam the -+ * syslog: -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define PASS_LIMIT 1000000 -+#else -+# define PASS_LIMIT 512 -+#endif - - #include - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch deleted file mode 100644 index cc9b251f8..000000000 --- a/kernel/patches-4.19.x-rt/0169-tty-serial-8250-don-t-take-the-trylock-during-oops.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 67a72f4debbad523744a56e90d284d809e830f82 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 11 Apr 2016 16:55:02 +0200 -Subject: [PATCH 169/328] tty: serial: 8250: don't take the trylock during oops - -An oops with irqs off (panic() from irqsafe hrtimer like the watchdog -timer) will lead to a lockdep warning on each invocation and as such -never completes. -Therefore we skip the trylock in the oops case. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/tty/serial/8250/8250_port.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index aa4de6907f77..6b1d46c1df3b 100644 ---- a/drivers/tty/serial/8250/8250_port.c -+++ b/drivers/tty/serial/8250/8250_port.c -@@ -3239,10 +3239,8 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, - - serial8250_rpm_get(up); - -- if (port->sysrq) -+ if (port->sysrq || oops_in_progress) - locked = 0; -- else if (oops_in_progress) -- locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch b/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch deleted file mode 100644 index 1844a901e..000000000 --- a/kernel/patches-4.19.x-rt/0170-locking-percpu-rwsem-Remove-preempt_disable-variants.patch +++ /dev/null @@ -1,225 +0,0 @@ -From fb0e4a895ad79687d4a90cf901ac9ce349f6818c Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Wed, 23 Nov 2016 16:29:32 +0100 -Subject: [PATCH 170/328] locking/percpu-rwsem: Remove preempt_disable variants - -Effective revert commit: - - 87709e28dc7c ("fs/locks: Use percpu_down_read_preempt_disable()") - -This is causing major pain for PREEMPT_RT and is only a very small -performance issue for PREEMPT=y. - -Signed-off-by: Peter Zijlstra (Intel) ---- - fs/locks.c | 32 ++++++++++++++++---------------- - include/linux/percpu-rwsem.h | 24 ++++-------------------- - 2 files changed, 20 insertions(+), 36 deletions(-) - -diff --git a/fs/locks.c b/fs/locks.c -index 28270e74be34..552476d6f6bb 100644 ---- a/fs/locks.c -+++ b/fs/locks.c -@@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) - return -ENOMEM; - } - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - if (request->fl_flags & FL_ACCESS) - goto find_conflict; -@@ -977,7 +977,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) - - out: - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - if (new_fl) - locks_free_lock(new_fl); - locks_dispose_list(&dispose); -@@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, - new_fl2 = locks_alloc_lock(); - } - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - /* - * New lock request. Walk all POSIX locks and look for conflicts. If -@@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, - } - out: - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - /* - * Free any unused locks. - */ -@@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) - return error; - } - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - - time_out_leases(inode, &dispose); -@@ -1514,13 +1514,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) - locks_insert_block(fl, new_fl); - trace_break_lease_block(inode, new_fl); - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - - locks_dispose_list(&dispose); - error = wait_event_interruptible_timeout(new_fl->fl_wait, - !new_fl->fl_next, break_time); - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - trace_break_lease_unblock(inode, new_fl); - locks_delete_block(new_fl); -@@ -1537,7 +1537,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) - } - out: - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - locks_dispose_list(&dispose); - locks_free_lock(new_fl); - return error; -@@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp) - - ctx = smp_load_acquire(&inode->i_flctx); - if (ctx && !list_empty_careful(&ctx->flc_lease)) { -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - time_out_leases(inode, &dispose); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { -@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp) - break; - } - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - - locks_dispose_list(&dispose); - } -@@ -1693,7 +1693,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr - return -EINVAL; - } - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - time_out_leases(inode, &dispose); - error = check_conflicting_open(dentry, arg, lease->fl_flags); -@@ -1764,7 +1764,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr - lease->fl_lmops->lm_setup(lease, priv); - out: - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - locks_dispose_list(&dispose); - if (is_deleg) - inode_unlock(inode); -@@ -1787,7 +1787,7 @@ static int generic_delete_lease(struct file *filp, void *owner) - return error; - } - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp && -@@ -1800,7 +1800,7 @@ static int generic_delete_lease(struct file *filp, void *owner) - if (victim) - error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - locks_dispose_list(&dispose); - return error; - } -@@ -2531,13 +2531,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) - if (list_empty(&ctx->flc_lease)) - return; - -- percpu_down_read_preempt_disable(&file_rwsem); -+ percpu_down_read(&file_rwsem); - spin_lock(&ctx->flc_lock); - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) - if (filp == fl->fl_file) - lease_modify(fl, F_UNLCK, &dispose); - spin_unlock(&ctx->flc_lock); -- percpu_up_read_preempt_enable(&file_rwsem); -+ percpu_up_read(&file_rwsem); - - locks_dispose_list(&dispose); - } -diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h -index 79b99d653e03..fb44e237316d 100644 ---- a/include/linux/percpu-rwsem.h -+++ b/include/linux/percpu-rwsem.h -@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ - extern int __percpu_down_read(struct percpu_rw_semaphore *, int); - extern void __percpu_up_read(struct percpu_rw_semaphore *); - --static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) -+static inline void percpu_down_read(struct percpu_rw_semaphore *sem) - { - might_sleep(); - -@@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * - __this_cpu_inc(*sem->read_count); - if (unlikely(!rcu_sync_is_idle(&sem->rss))) - __percpu_down_read(sem, false); /* Unconditional memory barrier */ -- barrier(); - /* -- * The barrier() prevents the compiler from -+ * The preempt_enable() prevents the compiler from - * bleeding the critical section out. - */ --} -- --static inline void percpu_down_read(struct percpu_rw_semaphore *sem) --{ -- percpu_down_read_preempt_disable(sem); - preempt_enable(); - } - -@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) - return ret; - } - --static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) -+static inline void percpu_up_read(struct percpu_rw_semaphore *sem) - { -- /* -- * The barrier() prevents the compiler from -- * bleeding the critical section out. -- */ -- barrier(); -+ preempt_disable(); - /* - * Same as in percpu_down_read(). - */ -@@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem - rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); - } - --static inline void percpu_up_read(struct percpu_rw_semaphore *sem) --{ -- preempt_disable(); -- percpu_up_read_preempt_enable(sem); --} -- - extern void percpu_down_write(struct percpu_rw_semaphore *); - extern void percpu_up_write(struct percpu_rw_semaphore *); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch b/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch deleted file mode 100644 index 2359beee2..000000000 --- a/kernel/patches-4.19.x-rt/0171-mm-Protect-activate_mm-by-preempt_-disable-enable-_r.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 9be3653b65220de706317fe352f73449254b0884 Mon Sep 17 00:00:00 2001 -From: Yong Zhang -Date: Tue, 15 May 2012 13:53:56 +0800 -Subject: [PATCH 171/328] mm: Protect activate_mm() by - preempt_[disable&enable]_rt() - -User preempt_*_rt instead of local_irq_*_rt or otherwise there will be -warning on ARM like below: - -WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() -Modules linked in: -[] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) -[] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) -[] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) -[] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) -[] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) -[] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) -[] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) -[] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) -[] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) -[] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) -[] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) ----[ end trace 0000000000000002 ]--- - -The reason is that ARM need irq enabled when doing activate_mm(). -According to mm-protect-activate-switch-mm.patch, actually -preempt_[disable|enable]_rt() is sufficient. - -Inspired-by: Steven Rostedt -Signed-off-by: Yong Zhang -Cc: Steven Rostedt -Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner ---- - fs/exec.c | 2 ++ - mm/mmu_context.c | 2 ++ - 2 files changed, 4 insertions(+) - -diff --git a/fs/exec.c b/fs/exec.c -index 561ea64829ec..0d95c6349fb1 100644 ---- a/fs/exec.c -+++ b/fs/exec.c -@@ -1028,12 +1028,14 @@ static int exec_mmap(struct mm_struct *mm) - } - } - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - tsk->mm = mm; - tsk->active_mm = mm; - activate_mm(active_mm, mm); - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - if (old_mm) { - up_read(&old_mm->mmap_sem); -diff --git a/mm/mmu_context.c b/mm/mmu_context.c -index 3e612ae748e9..d0ccc070979f 100644 ---- a/mm/mmu_context.c -+++ b/mm/mmu_context.c -@@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm) - struct task_struct *tsk = current; - - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - if (active_mm != mm) { - mmgrab(mm); -@@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm) - } - tsk->mm = mm; - switch_mm(active_mm, mm, tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - #ifdef finish_arch_post_lock_switch - finish_arch_post_lock_switch(); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch b/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch deleted file mode 100644 index 2a737d0ad..000000000 --- a/kernel/patches-4.19.x-rt/0172-fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-ini.patch +++ /dev/null @@ -1,60 +0,0 @@ -From e78e0f017d742dada34166305b5b965bc26ddef0 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 13 Sep 2017 12:32:34 +0200 -Subject: [PATCH 172/328] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD - init - -Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed -INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However -on RT we have also a spinlock which needs an init call so we can't use -that. - -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/dcache.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) - -diff --git a/fs/dcache.c b/fs/dcache.c -index 6e0022326afe..10225a9135fb 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -3060,6 +3060,8 @@ __setup("dhash_entries=", set_dhash_entries); - - static void __init dcache_init_early(void) - { -+ unsigned int loop; -+ - /* If hashes are distributed across NUMA nodes, defer - * hash allocation until vmalloc space is available. - */ -@@ -3076,11 +3078,16 @@ static void __init dcache_init_early(void) - NULL, - 0, - 0); -+ -+ for (loop = 0; loop < (1U << d_hash_shift); loop++) -+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop); -+ - d_hash_shift = 32 - d_hash_shift; - } - - static void __init dcache_init(void) - { -+ unsigned int loop; - /* - * A constructor could be added for stable state like the lists, - * but it is probably not worth it because of the cache nature -@@ -3104,6 +3111,10 @@ static void __init dcache_init(void) - NULL, - 0, - 0); -+ -+ for (loop = 0; loop < (1U << d_hash_shift); loop++) -+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop); -+ - d_hash_shift = 32 - d_hash_shift; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch deleted file mode 100644 index 0f8802cb1..000000000 --- a/kernel/patches-4.19.x-rt/0173-fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 01e7eacd56ecebd6290b2fa1bf421cf2669b3916 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 20 Oct 2017 11:29:53 +0200 -Subject: [PATCH 173/328] fs/dcache: disable preemption on i_dir_seq's write - side - -i_dir_seq is an opencoded seqcounter. Based on the code it looks like we -could have two writers in parallel despite the fact that the d_lock is -held. The problem is that during the write process on RT the preemption -is still enabled and if this process is interrupted by a reader with RT -priority then we lock up. -To avoid that lock up I am disabling the preemption during the update. -The rename of i_dir_seq is here to ensure to catch new write sides in -future. - -Cc: stable-rt@vger.kernel.org -Reported-by: Oleg.Karfich@wago.com -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/dcache.c | 12 +++++++----- - fs/inode.c | 2 +- - include/linux/fs.h | 2 +- - 3 files changed, 9 insertions(+), 7 deletions(-) - -diff --git a/fs/dcache.c b/fs/dcache.c -index 10225a9135fb..dcde8ffe384c 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -2404,9 +2404,10 @@ EXPORT_SYMBOL(d_rehash); - static inline unsigned start_dir_add(struct inode *dir) - { - -+ preempt_disable_rt(); - for (;;) { -- unsigned n = dir->i_dir_seq; -- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) -+ unsigned n = dir->__i_dir_seq; -+ if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n) - return n; - cpu_relax(); - } -@@ -2414,7 +2415,8 @@ static inline unsigned start_dir_add(struct inode *dir) - - static inline void end_dir_add(struct inode *dir, unsigned n) - { -- smp_store_release(&dir->i_dir_seq, n + 2); -+ smp_store_release(&dir->__i_dir_seq, n + 2); -+ preempt_enable_rt(); - } - - static void d_wait_lookup(struct dentry *dentry) -@@ -2447,7 +2449,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, - - retry: - rcu_read_lock(); -- seq = smp_load_acquire(&parent->d_inode->i_dir_seq); -+ seq = smp_load_acquire(&parent->d_inode->__i_dir_seq); - r_seq = read_seqbegin(&rename_lock); - dentry = __d_lookup_rcu(parent, name, &d_seq); - if (unlikely(dentry)) { -@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, - } - - hlist_bl_lock(b); -- if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { -+ if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) { - hlist_bl_unlock(b); - rcu_read_unlock(); - goto retry; -diff --git a/fs/inode.c b/fs/inode.c -index 9c50521c9fe4..40114e8b6c7b 100644 ---- a/fs/inode.c -+++ b/fs/inode.c -@@ -155,7 +155,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) - inode->i_bdev = NULL; - inode->i_cdev = NULL; - inode->i_link = NULL; -- inode->i_dir_seq = 0; -+ inode->__i_dir_seq = 0; - inode->i_rdev = 0; - inode->dirtied_when = 0; - -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 92420009b9bc..9b2b707e9112 100644 ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -678,7 +678,7 @@ struct inode { - struct block_device *i_bdev; - struct cdev *i_cdev; - char *i_link; -- unsigned i_dir_seq; -+ unsigned __i_dir_seq; - }; - - __u32 i_generation; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch b/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch deleted file mode 100644 index 56925098d..000000000 --- a/kernel/patches-4.19.x-rt/0174-squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 78a2f867f7f382fe3a1c20d4be00588e54d47242 Mon Sep 17 00:00:00 2001 -From: Julia Cartwright -Date: Mon, 7 May 2018 08:58:57 -0500 -Subject: [PATCH 174/328] squashfs: make use of local lock in multi_cpu - decompressor - -Currently, the squashfs multi_cpu decompressor makes use of -get_cpu_ptr()/put_cpu_ptr(), which unconditionally disable preemption -during decompression. - -Because the workload is distributed across CPUs, all CPUs can observe a -very high wakeup latency, which has been seen to be as much as 8000us. - -Convert this decompressor to make use of a local lock, which will allow -execution of the decompressor with preemption-enabled, but also ensure -concurrent accesses to the percpu compressor data on the local CPU will -be serialized. - -Cc: stable-rt@vger.kernel.org -Reported-by: Alexander Stein -Tested-by: Alexander Stein -Signed-off-by: Julia Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/squashfs/decompressor_multi_percpu.c | 16 ++++++++++++---- - 1 file changed, 12 insertions(+), 4 deletions(-) - -diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c -index 23a9c28ad8ea..6a73c4fa88e7 100644 ---- a/fs/squashfs/decompressor_multi_percpu.c -+++ b/fs/squashfs/decompressor_multi_percpu.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - #include "squashfs_fs.h" - #include "squashfs_fs_sb.h" -@@ -25,6 +26,8 @@ struct squashfs_stream { - void *stream; - }; - -+static DEFINE_LOCAL_IRQ_LOCK(stream_lock); -+ - void *squashfs_decompressor_create(struct squashfs_sb_info *msblk, - void *comp_opts) - { -@@ -79,10 +82,15 @@ int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh, - { - struct squashfs_stream __percpu *percpu = - (struct squashfs_stream __percpu *) msblk->stream; -- struct squashfs_stream *stream = get_cpu_ptr(percpu); -- int res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, -- offset, length, output); -- put_cpu_ptr(stream); -+ struct squashfs_stream *stream; -+ int res; -+ -+ stream = get_locked_ptr(stream_lock, percpu); -+ -+ res = msblk->decompressor->decompress(msblk, stream->stream, bh, b, -+ offset, length, output); -+ -+ put_locked_ptr(stream_lock, stream); - - if (res < 0) - ERROR("%s decompression failed, data probably corrupt\n", --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch b/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch deleted file mode 100644 index b0f35f596..000000000 --- a/kernel/patches-4.19.x-rt/0175-thermal-Defer-thermal-wakups-to-threads.patch +++ /dev/null @@ -1,136 +0,0 @@ -From 472de6450cf587e0ada3ba8fb8e2944c605981aa Mon Sep 17 00:00:00 2001 -From: Daniel Wagner -Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: [PATCH 175/328] thermal: Defer thermal wakups to threads - -On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will -call schedule while we run in irq context. - -[] dump_stack+0x4e/0x8f -[] __schedule_bug+0xa6/0xb4 -[] __schedule+0x5b4/0x700 -[] schedule+0x2a/0x90 -[] rt_spin_lock_slowlock+0xe5/0x2d0 -[] rt_spin_lock+0x25/0x30 -[] pkg_temp_thermal_platform_thermal_notify+0x45/0x134 [x86_pkg_temp_thermal] -[] ? therm_throt_process+0x1b/0x160 -[] intel_thermal_interrupt+0x211/0x250 -[] smp_thermal_interrupt+0x21/0x40 -[] thermal_interrupt+0x6d/0x80 - -Let's defer the work to a kthread. - -Signed-off-by: Daniel Wagner -[bigeasy: reoder init/denit position. TODO: flush swork on exit] -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/thermal/x86_pkg_temp_thermal.c | 52 ++++++++++++++++++++++++-- - 1 file changed, 49 insertions(+), 3 deletions(-) - -diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c -index 1ef937d799e4..a5991cbb408f 100644 ---- a/drivers/thermal/x86_pkg_temp_thermal.c -+++ b/drivers/thermal/x86_pkg_temp_thermal.c -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) - schedule_delayed_work_on(cpu, work, ms); - } - --static int pkg_thermal_notify(u64 msr_val) -+static void pkg_thermal_notify_work(struct swork_event *event) - { - int cpu = smp_processor_id(); - struct pkg_device *pkgdev; -@@ -348,9 +349,47 @@ static int pkg_thermal_notify(u64 msr_val) - } - - spin_unlock_irqrestore(&pkg_temp_lock, flags); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct swork_event notify_work; -+ -+static int pkg_thermal_notify_work_init(void) -+{ -+ int err; -+ -+ err = swork_get(); -+ if (err) -+ return err; -+ -+ INIT_SWORK(¬ify_work, pkg_thermal_notify_work); - return 0; - } - -+static void pkg_thermal_notify_work_cleanup(void) -+{ -+ swork_put(); -+} -+ -+static int pkg_thermal_notify(u64 msr_val) -+{ -+ swork_queue(¬ify_work); -+ return 0; -+} -+ -+#else /* !CONFIG_PREEMPT_RT_FULL */ -+ -+static int pkg_thermal_notify_work_init(void) { return 0; } -+ -+static void pkg_thermal_notify_work_cleanup(void) { } -+ -+static int pkg_thermal_notify(u64 msr_val) -+{ -+ pkg_thermal_notify_work(NULL); -+ return 0; -+} -+#endif /* CONFIG_PREEMPT_RT_FULL */ -+ - static int pkg_temp_thermal_device_add(unsigned int cpu) - { - int pkgid = topology_logical_package_id(cpu); -@@ -515,11 +554,16 @@ static int __init pkg_temp_thermal_init(void) - if (!x86_match_cpu(pkg_temp_thermal_ids)) - return -ENODEV; - -+ if (!pkg_thermal_notify_work_init()) -+ return -ENODEV; -+ - max_packages = topology_max_packages(); - packages = kcalloc(max_packages, sizeof(struct pkg_device *), - GFP_KERNEL); -- if (!packages) -- return -ENOMEM; -+ if (!packages) { -+ ret = -ENOMEM; -+ goto err; -+ } - - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", - pkg_thermal_cpu_online, pkg_thermal_cpu_offline); -@@ -537,6 +581,7 @@ static int __init pkg_temp_thermal_init(void) - return 0; - - err: -+ pkg_thermal_notify_work_cleanup(); - kfree(packages); - return ret; - } -@@ -550,6 +595,7 @@ static void __exit pkg_temp_thermal_exit(void) - cpuhp_remove_state(pkg_thermal_hp_state); - debugfs_remove_recursive(debugfs); - kfree(packages); -+ pkg_thermal_notify_work_cleanup(); - } - module_exit(pkg_temp_thermal_exit) - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch b/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch deleted file mode 100644 index 71822734f..000000000 --- a/kernel/patches-4.19.x-rt/0176-x86-fpu-Disable-preemption-around-local_bh_disable.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 50d08167a197542caf75d7b8885fc7b4cf32f3e5 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 11 Dec 2018 15:10:33 +0100 -Subject: [PATCH 176/328] x86/fpu: Disable preemption around local_bh_disable() - -__fpu__restore_sig() restores the content of the FPU state in the CPUs -and in order to avoid concurency it disbles BH. On !RT it also disables -preemption but on RT we can get preempted in BH. - -Add preempt_disable() while the FPU state is restored. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/kernel/fpu/signal.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c -index d99a8ee9e185..5e0274a94133 100644 ---- a/arch/x86/kernel/fpu/signal.c -+++ b/arch/x86/kernel/fpu/signal.c -@@ -344,10 +344,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) - sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); - } - -+ preempt_disable(); - local_bh_disable(); - fpu->initialized = 1; - fpu__restore(fpu); - local_bh_enable(); -+ preempt_enable(); - - return err; - } else { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch b/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch deleted file mode 100644 index 63a80d8fd..000000000 --- a/kernel/patches-4.19.x-rt/0177-fs-epoll-Do-not-disable-preemption-on-RT.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 55f9cf79b55c65f9e7858b99be1e12635c64a990 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 8 Jul 2011 16:35:35 +0200 -Subject: [PATCH 177/328] fs/epoll: Do not disable preemption on RT - -ep_call_nested() takes a sleeping lock so we can't disable preemption. -The light version is enough since ep_call_nested() doesn't mind beeing -invoked twice on the same CPU. - -Signed-off-by: Thomas Gleixner ---- - fs/eventpoll.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/fs/eventpoll.c b/fs/eventpoll.c -index 58f48ea0db23..a41120a34e6d 100644 ---- a/fs/eventpoll.c -+++ b/fs/eventpoll.c -@@ -571,12 +571,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) - - static void ep_poll_safewake(wait_queue_head_t *wq) - { -- int this_cpu = get_cpu(); -+ int this_cpu = get_cpu_light(); - - ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, - ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - -- put_cpu(); -+ put_cpu_light(); - } - - #else --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch deleted file mode 100644 index e453d06bf..000000000 --- a/kernel/patches-4.19.x-rt/0178-mm-vmalloc-Another-preempt-disable-region-which-suck.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 32e65f9533004d40e52f6a101ac651c16392f178 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 12 Jul 2011 11:39:36 +0200 -Subject: [PATCH 178/328] mm/vmalloc: Another preempt disable region which - sucks - -Avoid the preempt disable version of get_cpu_var(). The inner-lock should -provide enough serialisation. - -Signed-off-by: Thomas Gleixner ---- - mm/vmalloc.c | 13 ++++++++----- - 1 file changed, 8 insertions(+), 5 deletions(-) - -diff --git a/mm/vmalloc.c b/mm/vmalloc.c -index d8e877365f9f..9b7cf993cada 100644 ---- a/mm/vmalloc.c -+++ b/mm/vmalloc.c -@@ -852,7 +852,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) - struct vmap_block *vb; - struct vmap_area *va; - unsigned long vb_idx; -- int node, err; -+ int node, err, cpu; - void *vaddr; - - node = numa_node_id(); -@@ -895,11 +895,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) - BUG_ON(err); - radix_tree_preload_end(); - -- vbq = &get_cpu_var(vmap_block_queue); -+ cpu = get_cpu_light(); -+ vbq = this_cpu_ptr(&vmap_block_queue); - spin_lock(&vbq->lock); - list_add_tail_rcu(&vb->free_list, &vbq->free); - spin_unlock(&vbq->lock); -- put_cpu_var(vmap_block_queue); -+ put_cpu_light(); - - return vaddr; - } -@@ -968,6 +969,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) - struct vmap_block *vb; - void *vaddr = NULL; - unsigned int order; -+ int cpu; - - BUG_ON(offset_in_page(size)); - BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -982,7 +984,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) - order = get_order(size); - - rcu_read_lock(); -- vbq = &get_cpu_var(vmap_block_queue); -+ cpu = get_cpu_light(); -+ vbq = this_cpu_ptr(&vmap_block_queue); - list_for_each_entry_rcu(vb, &vbq->free, free_list) { - unsigned long pages_off; - -@@ -1005,7 +1008,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) - break; - } - -- put_cpu_var(vmap_block_queue); -+ put_cpu_light(); - rcu_read_unlock(); - - /* Allocate new block if nothing was found */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch b/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch deleted file mode 100644 index 64f2272d0..000000000 --- a/kernel/patches-4.19.x-rt/0179-block-mq-use-cpu_light.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 736284010de7d42e40ecf1250dfb1f45e5a94d4e Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 9 Apr 2014 10:37:23 +0200 -Subject: [PATCH 179/328] block: mq: use cpu_light() - -there is a might sleep splat because get_cpu() disables preemption and -later we grab a lock. As a workaround for this we use get_cpu_light(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-mq.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/block/blk-mq.h b/block/blk-mq.h -index 5ad9251627f8..5a96c97991b6 100644 ---- a/block/blk-mq.h -+++ b/block/blk-mq.h -@@ -113,12 +113,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, - */ - static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) - { -- return __blk_mq_get_ctx(q, get_cpu()); -+ return __blk_mq_get_ctx(q, get_cpu_light()); - } - - static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) - { -- put_cpu(); -+ put_cpu_light(); - } - - struct blk_mq_alloc_data { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch b/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch deleted file mode 100644 index ca297c050..000000000 --- a/kernel/patches-4.19.x-rt/0180-block-mq-do-not-invoke-preempt_disable.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 76fb1c9c1652d4dec489aa1611a9071f1b42b5b0 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 180/328] block/mq: do not invoke preempt_disable() - -preempt_disable() and get_cpu() don't play well together with the sleeping -locks it tries to allocate later. -It seems to be enough to replace it with get_cpu_light() and migrate_disable(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-mq.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/block/blk-mq.c b/block/blk-mq.c -index 4aa3284874f6..376fb90de054 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -570,7 +570,7 @@ static void __blk_mq_complete_request(struct request *rq) - return; - } - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) - shared = cpus_share_cache(cpu, ctx->cpu); - -@@ -582,7 +582,7 @@ static void __blk_mq_complete_request(struct request *rq) - } else { - rq->q->softirq_done_fn(rq); - } -- put_cpu(); -+ put_cpu_light(); - } - - static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) -@@ -1371,14 +1371,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, - return; - - if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { -- int cpu = get_cpu(); -+ int cpu = get_cpu_light(); - if (cpumask_test_cpu(cpu, hctx->cpumask)) { - __blk_mq_run_hw_queue(hctx); -- put_cpu(); -+ put_cpu_light(); - return; - } - -- put_cpu(); -+ put_cpu_light(); - } - - kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch b/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch deleted file mode 100644 index 15cc19563..000000000 --- a/kernel/patches-4.19.x-rt/0181-block-mq-don-t-complete-requests-via-IPI.patch +++ /dev/null @@ -1,118 +0,0 @@ -From eb88fbf4264f3c4776aeeec310568e4bc79ced80 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 29 Jan 2015 15:10:08 +0100 -Subject: [PATCH 181/328] block/mq: don't complete requests via IPI - -The IPI runs in hardirq context and there are sleeping locks. This patch -moves the completion into a workqueue. - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-core.c | 3 +++ - block/blk-mq.c | 23 +++++++++++++++++++++++ - include/linux/blk-mq.h | 2 +- - include/linux/blkdev.h | 3 +++ - 4 files changed, 30 insertions(+), 1 deletion(-) - -diff --git a/block/blk-core.c b/block/blk-core.c -index ea33d6abdcfc..4860cd26cd5a 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq) - - INIT_LIST_HEAD(&rq->queuelist); - INIT_LIST_HEAD(&rq->timeout_list); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - rq->cpu = -1; - rq->q = q; - rq->__sector = (sector_t) -1; -diff --git a/block/blk-mq.c b/block/blk-mq.c -index 376fb90de054..5808446e4758 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, - rq->extra_len = 0; - rq->__deadline = 0; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - INIT_LIST_HEAD(&rq->timeout_list); - rq->timeout = 0; - -@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) - } - EXPORT_SYMBOL(blk_mq_end_request); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+void __blk_mq_complete_request_remote_work(struct work_struct *work) -+{ -+ struct request *rq = container_of(work, struct request, work); -+ -+ rq->q->softirq_done_fn(rq); -+} -+ -+#else -+ - static void __blk_mq_complete_request_remote(void *data) - { - struct request *rq = data; - - rq->q->softirq_done_fn(rq); - } -+#endif - - static void __blk_mq_complete_request(struct request *rq) - { -@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(struct request *rq) - shared = cpus_share_cache(cpu, ctx->cpu); - - if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in -+ * here. But we could try to invoke it one the CPU like this. -+ */ -+ schedule_work_on(ctx->cpu, &rq->work); -+#else - rq->csd.func = __blk_mq_complete_request_remote; - rq->csd.info = rq; - rq->csd.flags = 0; - smp_call_function_single_async(ctx->cpu, &rq->csd); -+#endif - } else { - rq->q->softirq_done_fn(rq); - } -diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h -index 2885dce1ad49..8dbb9ecf9993 100644 ---- a/include/linux/blk-mq.h -+++ b/include/linux/blk-mq.h -@@ -256,7 +256,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) - return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; - } - -- -+void __blk_mq_complete_request_remote_work(struct work_struct *work); - int blk_mq_request_started(struct request *rq); - void blk_mq_start_request(struct request *rq); - void blk_mq_end_request(struct request *rq, blk_status_t error); -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 6e67aeb56928..111ab4209797 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -149,6 +149,9 @@ enum mq_rq_state { - */ - struct request { - struct request_queue *q; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct work_struct work; -+#endif - struct blk_mq_ctx *mq_ctx; - - int cpu; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch deleted file mode 100644 index 2847ecca8..000000000 --- a/kernel/patches-4.19.x-rt/0182-md-raid5-Make-raid5_percpu-handling-RT-aware.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 283bc898b7a8aeb35a5022147d72342a7b77170c Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 6 Apr 2010 16:51:31 +0200 -Subject: [PATCH 182/328] md: raid5: Make raid5_percpu handling RT aware - -__raid_run_ops() disables preemption with get_cpu() around the access -to the raid5_percpu variables. That causes scheduling while atomic -spews on RT. - -Serialize the access to the percpu data with a lock and keep the code -preemptible. - -Reported-by: Udo van den Heuvel -Signed-off-by: Thomas Gleixner -Tested-by: Udo van den Heuvel ---- - drivers/md/raid5.c | 8 +++++--- - drivers/md/raid5.h | 1 + - 2 files changed, 6 insertions(+), 3 deletions(-) - -diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c -index 01021382131b..379ce2f57512 100644 ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) - struct raid5_percpu *percpu; - unsigned long cpu; - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - percpu = per_cpu_ptr(conf->percpu, cpu); -+ spin_lock(&percpu->lock); - if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { - ops_run_biofill(sh); - overlap_clear++; -@@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) - if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&sh->raid_conf->wait_for_overlap); - } -- put_cpu(); -+ spin_unlock(&percpu->lock); -+ put_cpu_light(); - } - - static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) -@@ -6815,6 +6817,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) - __func__, cpu); - return -ENOMEM; - } -+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); - return 0; - } - -@@ -6825,7 +6828,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) - conf->percpu = alloc_percpu(struct raid5_percpu); - if (!conf->percpu) - return -ENOMEM; -- - err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); - if (!err) { - conf->scribble_disks = max(conf->raid_disks, -diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h -index 8474c224127b..a3bf907ab2af 100644 ---- a/drivers/md/raid5.h -+++ b/drivers/md/raid5.h -@@ -637,6 +637,7 @@ struct r5conf { - int recovery_disabled; - /* per cpu variables */ - struct raid5_percpu { -+ spinlock_t lock; /* Protection for -RT */ - struct page *spare_page; /* Used when checking P/Q in raid6 */ - struct flex_array *scribble; /* space for constructing buffer - * lists and performing address --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch b/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch deleted file mode 100644 index 74ced432d..000000000 --- a/kernel/patches-4.19.x-rt/0183-rt-Introduce-cpu_chill.patch +++ /dev/null @@ -1,112 +0,0 @@ -From e495449b207a0278e1ea08626dbea152b9cfa1ca Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 7 Mar 2012 20:51:03 +0100 -Subject: [PATCH 183/328] rt: Introduce cpu_chill() - -Retry loops on RT might loop forever when the modifying side was -preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() -defaults to cpu_relax() for non RT. On RT it puts the looping task to -sleep for a tick so the preempted task can make progress. - -Steven Rostedt changed it to use a hrtimer instead of msleep(): -| -|Ulrich Obergfell pointed out that cpu_chill() calls msleep() which is woken -|up by the ksoftirqd running the TIMER softirq. But as the cpu_chill() is -|called from softirq context, it may block the ksoftirqd() from running, in -|which case, it may never wake up the msleep() causing the deadlock. - -+ bigeasy later changed to schedule_hrtimeout() -|If a task calls cpu_chill() and gets woken up by a regular or spurious -|wakeup and has a signal pending, then it exits the sleep loop in -|do_nanosleep() and sets up the restart block. If restart->nanosleep.type is -|not TI_NONE then this results in accessing a stale user pointer from a -|previously interrupted syscall and a copy to user based on the stale -|pointer or a BUG() when 'type' is not supported in nanosleep_copyout(). - -+ bigeasy: add PF_NOFREEZE: -| [....] Waiting for /dev to be fully populated... -| ===================================== -| [ BUG: udevd/229 still has locks held! ] -| 3.12.11-rt17 #23 Not tainted -| ------------------------------------- -| 1 lock held by udevd/229: -| #0: (&type->i_mutex_dir_key#2){+.+.+.}, at: lookup_slow+0x28/0x98 -| -| stack backtrace: -| CPU: 0 PID: 229 Comm: udevd Not tainted 3.12.11-rt17 #23 -| (unwind_backtrace+0x0/0xf8) from (show_stack+0x10/0x14) -| (show_stack+0x10/0x14) from (dump_stack+0x74/0xbc) -| (dump_stack+0x74/0xbc) from (do_nanosleep+0x120/0x160) -| (do_nanosleep+0x120/0x160) from (hrtimer_nanosleep+0x90/0x110) -| (hrtimer_nanosleep+0x90/0x110) from (cpu_chill+0x30/0x38) -| (cpu_chill+0x30/0x38) from (dentry_kill+0x158/0x1ec) -| (dentry_kill+0x158/0x1ec) from (dput+0x74/0x15c) -| (dput+0x74/0x15c) from (lookup_real+0x4c/0x50) -| (lookup_real+0x4c/0x50) from (__lookup_hash+0x34/0x44) -| (__lookup_hash+0x34/0x44) from (lookup_slow+0x38/0x98) -| (lookup_slow+0x38/0x98) from (path_lookupat+0x208/0x7fc) -| (path_lookupat+0x208/0x7fc) from (filename_lookup+0x20/0x60) -| (filename_lookup+0x20/0x60) from (user_path_at_empty+0x50/0x7c) -| (user_path_at_empty+0x50/0x7c) from (user_path_at+0x14/0x1c) -| (user_path_at+0x14/0x1c) from (vfs_fstatat+0x48/0x94) -| (vfs_fstatat+0x48/0x94) from (SyS_stat64+0x14/0x30) -| (SyS_stat64+0x14/0x30) from (ret_fast_syscall+0x0/0x48) - -Signed-off-by: Thomas Gleixner -Signed-off-by: Steven Rostedt -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/delay.h | 6 ++++++ - kernel/time/hrtimer.c | 21 +++++++++++++++++++++ - 2 files changed, 27 insertions(+) - -diff --git a/include/linux/delay.h b/include/linux/delay.h -index b78bab4395d8..7c4bc414a504 100644 ---- a/include/linux/delay.h -+++ b/include/linux/delay.h -@@ -64,4 +64,10 @@ static inline void ssleep(unsigned int seconds) - msleep(seconds * 1000); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void cpu_chill(void); -+#else -+# define cpu_chill() cpu_relax() -+#endif -+ - #endif /* defined(_LINUX_DELAY_H) */ -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index b800efb64238..98a771065818 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1897,6 +1897,27 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, - } - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Sleep for 1 ms in hope whoever holds what we want will let it go. -+ */ -+void cpu_chill(void) -+{ -+ ktime_t chill_time; -+ unsigned int freeze_flag = current->flags & PF_NOFREEZE; -+ -+ chill_time = ktime_set(0, NSEC_PER_MSEC); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ current->flags |= PF_NOFREEZE; -+ sleeping_lock_inc(); -+ schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD); -+ sleeping_lock_dec(); -+ if (!freeze_flag) -+ current->flags &= ~PF_NOFREEZE; -+} -+EXPORT_SYMBOL(cpu_chill); -+#endif -+ - /* - * Functions related to boot-time initialization: - */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch b/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch deleted file mode 100644 index 40e30280b..000000000 --- a/kernel/patches-4.19.x-rt/0184-hrtimer-Don-t-lose-state-in-cpu_chill.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 85650b9854b0d27ec65285b4f60cc63bb100669f Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 19 Feb 2019 16:59:15 +0100 -Subject: [PATCH 184/328] hrtimer: Don't lose state in cpu_chill() - -In cpu_chill() the state is set to TASK_UNINTERRUPTIBLE and a timer is -programmed. On return the state is always TASK_RUNNING which means we -lose the state if it was something other than RUNNING. Also -set_current_state() sets ->task_state_change to within cpu_chill() which -is not expected. - -Save the task state on entry and restore it on return. Simply set the -state in order to avoid updating ->task_state_change. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 98a771065818..21a454557c8a 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1905,15 +1905,18 @@ void cpu_chill(void) - { - ktime_t chill_time; - unsigned int freeze_flag = current->flags & PF_NOFREEZE; -+ long saved_state; - -+ saved_state = current->state; - chill_time = ktime_set(0, NSEC_PER_MSEC); -- set_current_state(TASK_UNINTERRUPTIBLE); -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); - current->flags |= PF_NOFREEZE; - sleeping_lock_inc(); - schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD); - sleeping_lock_dec(); - if (!freeze_flag) - current->flags &= ~PF_NOFREEZE; -+ __set_current_state_no_track(saved_state); - } - EXPORT_SYMBOL(cpu_chill); - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch b/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch deleted file mode 100644 index c121ec7ff..000000000 --- a/kernel/patches-4.19.x-rt/0185-hrtimer-cpu_chill-save-task-state-in-saved_state.patch +++ /dev/null @@ -1,62 +0,0 @@ -From c5e6ba548ce7b077f2c912fa506401cccf68d85d Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 26 Feb 2019 12:31:10 +0100 -Subject: [PATCH 185/328] hrtimer: cpu_chill(): save task state in - ->saved_state() - -In the previous change I saved the current task state on stack. This was -bad because while the task is scheduled-out it might receive a wake-up. -The wake up changes the task state and we must not destroy it. - -Save the task-state in ->saved_state under a PI-lock to unsure that -state changes during are not missed while the task temporary scheduled -out. - -Reported-by: Mike Galbraith -Tested-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 18 +++++++++++++----- - 1 file changed, 13 insertions(+), 5 deletions(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 21a454557c8a..f16cbc98c47a 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -1903,20 +1903,28 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, - */ - void cpu_chill(void) - { -- ktime_t chill_time; - unsigned int freeze_flag = current->flags & PF_NOFREEZE; -- long saved_state; -+ struct task_struct *self = current; -+ ktime_t chill_time; - -- saved_state = current->state; -- chill_time = ktime_set(0, NSEC_PER_MSEC); -+ raw_spin_lock_irq(&self->pi_lock); -+ self->saved_state = self->state; - __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock_irq(&self->pi_lock); -+ -+ chill_time = ktime_set(0, NSEC_PER_MSEC); -+ - current->flags |= PF_NOFREEZE; - sleeping_lock_inc(); - schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD); - sleeping_lock_dec(); - if (!freeze_flag) - current->flags &= ~PF_NOFREEZE; -- __set_current_state_no_track(saved_state); -+ -+ raw_spin_lock_irq(&self->pi_lock); -+ __set_current_state_no_track(self->saved_state); -+ self->saved_state = TASK_RUNNING; -+ raw_spin_unlock_irq(&self->pi_lock); - } - EXPORT_SYMBOL(cpu_chill); - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch deleted file mode 100644 index 71d1dc862..000000000 --- a/kernel/patches-4.19.x-rt/0186-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ /dev/null @@ -1,119 +0,0 @@ -From 57df777639aebaef79d278a5dfa3d749c5884099 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 13 Mar 2018 13:49:16 +0100 -Subject: [PATCH 186/328] block: blk-mq: move blk_queue_usage_counter_release() - into process context - -| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 -| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 -| 5 locks held by kworker/u257:6/255: -| #0: ("events_unbound"){.+.+.+}, at: [] process_one_work+0x171/0x5e0 -| #1: ((&entry->work)){+.+.+.}, at: [] process_one_work+0x171/0x5e0 -| #2: (&shost->scan_mutex){+.+.+.}, at: [] __scsi_add_device+0xa3/0x130 [scsi_mod] -| #3: (&set->tag_list_lock){+.+...}, at: [] blk_mq_init_queue+0x96a/0xa50 -| #4: (rcu_read_lock_sched){......}, at: [] percpu_ref_kill_and_confirm+0x1d/0x120 -| Preemption disabled at:[] blk_mq_freeze_queue_start+0x56/0x70 -| -| CPU: 2 PID: 255 Comm: kworker/u257:6 Not tainted 3.18.7-rt0+ #1 -| Workqueue: events_unbound async_run_entry_fn -| 0000000000000003 ffff8800bc29f998 ffffffff815b3a12 0000000000000000 -| 0000000000000000 ffff8800bc29f9b8 ffffffff8109aa16 ffff8800bc29fa28 -| ffff8800bc5d1bc8 ffff8800bc29f9e8 ffffffff815b8dd4 ffff880000000000 -| Call Trace: -| [] dump_stack+0x4f/0x7c -| [] __might_sleep+0x116/0x190 -| [] rt_spin_lock+0x24/0x60 -| [] __wake_up+0x29/0x60 -| [] blk_mq_usage_counter_release+0x1e/0x20 -| [] percpu_ref_kill_and_confirm+0x106/0x120 -| [] blk_mq_freeze_queue_start+0x56/0x70 -| [] blk_mq_update_tag_set_depth+0x40/0xd0 -| [] blk_mq_init_queue+0x98c/0xa50 -| [] scsi_mq_alloc_queue+0x20/0x60 [scsi_mod] -| [] scsi_alloc_sdev+0x2f5/0x370 [scsi_mod] -| [] scsi_probe_and_add_lun+0x9e4/0xdd0 [scsi_mod] -| [] __scsi_add_device+0x126/0x130 [scsi_mod] -| [] ata_scsi_scan_host+0xaf/0x200 [libata] -| [] async_port_probe+0x46/0x60 [libata] -| [] async_run_entry_fn+0x3b/0xf0 -| [] process_one_work+0x201/0x5e0 - -percpu_ref_kill_and_confirm() invokes blk_mq_usage_counter_release() in -a rcu-sched region. swait based wake queue can't be used due to -wake_up_all() usage and disabled interrupts in !RT configs (as reported -by Corey Minyard). -The wq_has_sleeper() check has been suggested by Peter Zijlstra. - -Signed-off-by: Sebastian Andrzej Siewior ---- - block/blk-core.c | 14 +++++++++++++- - include/linux/blkdev.h | 2 ++ - 2 files changed, 15 insertions(+), 1 deletion(-) - -diff --git a/block/blk-core.c b/block/blk-core.c -index 4860cd26cd5a..13bf37156bb0 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -973,12 +973,21 @@ void blk_queue_exit(struct request_queue *q) - percpu_ref_put(&q->q_usage_counter); - } - -+static void blk_queue_usage_counter_release_swork(struct swork_event *sev) -+{ -+ struct request_queue *q = -+ container_of(sev, struct request_queue, mq_pcpu_wake); -+ -+ wake_up_all(&q->mq_freeze_wq); -+} -+ - static void blk_queue_usage_counter_release(struct percpu_ref *ref) - { - struct request_queue *q = - container_of(ref, struct request_queue, q_usage_counter); - -- wake_up_all(&q->mq_freeze_wq); -+ if (wq_has_sleeper(&q->mq_freeze_wq)) -+ swork_queue(&q->mq_pcpu_wake); - } - - static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1075,6 +1084,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, - queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); - - init_waitqueue_head(&q->mq_freeze_wq); -+ INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork); - - /* - * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3964,6 +3974,8 @@ int __init blk_dev_init(void) - if (!kblockd_workqueue) - panic("Failed to create kblockd\n"); - -+ BUG_ON(swork_get()); -+ - request_cachep = kmem_cache_create("blkdev_requests", - sizeof(struct request), 0, SLAB_PANIC, NULL); - -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 111ab4209797..2f3b2e5196eb 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - - struct module; - struct scsi_ioctl_command; -@@ -655,6 +656,7 @@ struct request_queue { - #endif - struct rcu_head rcu_head; - wait_queue_head_t mq_freeze_wq; -+ struct swork_event mq_pcpu_wake; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch b/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch deleted file mode 100644 index 234d92d4f..000000000 --- a/kernel/patches-4.19.x-rt/0187-block-Use-cpu_chill-for-retry-loops.patch +++ /dev/null @@ -1,50 +0,0 @@ -From b86763e1cdbb47e8674463b0be6daf0c5c09cabf Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 20 Dec 2012 18:28:26 +0100 -Subject: [PATCH 187/328] block: Use cpu_chill() for retry loops - -Retry loops on RT might loop forever when the modifying side was -preempted. Steven also observed a live lock when there was a -concurrent priority boosting going on. - -Use cpu_chill() instead of cpu_relax() to let the system -make progress. - -Signed-off-by: Thomas Gleixner ---- - block/blk-ioc.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/block/blk-ioc.c b/block/blk-ioc.c -index 01580f88fcb3..98d87e52ccdc 100644 ---- a/block/blk-ioc.c -+++ b/block/blk-ioc.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - - #include "blk.h" - -@@ -118,7 +119,7 @@ static void ioc_release_fn(struct work_struct *work) - spin_unlock(q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - spin_lock_irqsave_nested(&ioc->lock, flags, 1); - } - } -@@ -202,7 +203,7 @@ void put_io_context_active(struct io_context *ioc) - spin_unlock(icq->q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - goto retry; - } - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch b/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch deleted file mode 100644 index 6b8cb5440..000000000 --- a/kernel/patches-4.19.x-rt/0188-fs-dcache-Use-cpu_chill-in-trylock-loops.patch +++ /dev/null @@ -1,65 +0,0 @@ -From e4c2d46c06d5e1181e6ab26038689d1c112b9d0b Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 7 Mar 2012 21:00:34 +0100 -Subject: [PATCH 188/328] fs: dcache: Use cpu_chill() in trylock loops - -Retry loops on RT might loop forever when the modifying side was -preempted. Use cpu_chill() instead of cpu_relax() to let the system -make progress. - -Signed-off-by: Thomas Gleixner ---- - fs/autofs/expire.c | 3 ++- - fs/namespace.c | 8 ++++++-- - 2 files changed, 8 insertions(+), 3 deletions(-) - -diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c -index 70e9afe589fb..1a6b88ad4fe0 100644 ---- a/fs/autofs/expire.c -+++ b/fs/autofs/expire.c -@@ -8,6 +8,7 @@ - * option, any later version, incorporated herein by reference. - */ - -+#include - #include "autofs_i.h" - - /* Check if a dentry can be expired */ -@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev, - parent = p->d_parent; - if (!spin_trylock(&parent->d_lock)) { - spin_unlock(&p->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto relock; - } - spin_unlock(&p->d_lock); -diff --git a/fs/namespace.c b/fs/namespace.c -index 1fce41ba3535..5dc970027e30 100644 ---- a/fs/namespace.c -+++ b/fs/namespace.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -327,8 +328,11 @@ int __mnt_want_write(struct vfsmount *m) - * incremented count after it has set MNT_WRITE_HOLD. - */ - smp_mb(); -- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) -- cpu_relax(); -+ while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { -+ preempt_enable(); -+ cpu_chill(); -+ preempt_disable(); -+ } - /* - * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will - * be set to match its requirements. So we must not load that until --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch b/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch deleted file mode 100644 index b65ea6ef4..000000000 --- a/kernel/patches-4.19.x-rt/0189-net-Use-cpu_chill-instead-of-cpu_relax.patch +++ /dev/null @@ -1,69 +0,0 @@ -From cef4fa5aa7e55b664ae92cd88f7e09b2214c5d7f Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 7 Mar 2012 21:10:04 +0100 -Subject: [PATCH 189/328] net: Use cpu_chill() instead of cpu_relax() - -Retry loops on RT might loop forever when the modifying side was -preempted. Use cpu_chill() instead of cpu_relax() to let the system -make progress. - -Signed-off-by: Thomas Gleixner ---- - net/packet/af_packet.c | 5 +++-- - net/rds/ib_rdma.c | 3 ++- - 2 files changed, 5 insertions(+), 3 deletions(-) - -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c -index ddf90e6fac51..65befa5ee0c3 100644 ---- a/net/packet/af_packet.c -+++ b/net/packet/af_packet.c -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -668,7 +669,7 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t) - if (BLOCK_NUM_PKTS(pbd)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ -- cpu_relax(); -+ cpu_chill(); - } - } - -@@ -930,7 +931,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, - if (!(status & TP_STATUS_BLK_TMO)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ -- cpu_relax(); -+ cpu_chill(); - } - } - prb_close_block(pkc, pbd, po, status); -diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c -index 0b347f46b2f4..f395f06031bc 100644 ---- a/net/rds/ib_rdma.c -+++ b/net/rds/ib_rdma.c -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - #include "rds_single_path.h" - #include "ib_mr.h" -@@ -222,7 +223,7 @@ static inline void wait_clean_list_grace(void) - for_each_online_cpu(cpu) { - flag = &per_cpu(clean_list_grace, cpu); - while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) -- cpu_relax(); -+ cpu_chill(); - } - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch deleted file mode 100644 index 8dcbe6c54..000000000 --- a/kernel/patches-4.19.x-rt/0190-fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ /dev/null @@ -1,240 +0,0 @@ -From fc8e48cfb7705d33f133b8b927ec35a1c95af05d Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 14 Sep 2016 14:35:49 +0200 -Subject: [PATCH 190/328] fs/dcache: use swait_queue instead of waitqueue - -__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock() -which disables preemption. As a workaround convert it to swait. - -Signed-off-by: Sebastian Andrzej Siewior ---- - fs/cifs/readdir.c | 2 +- - fs/dcache.c | 27 +++++++++++++++------------ - fs/fuse/dir.c | 2 +- - fs/namei.c | 4 ++-- - fs/nfs/dir.c | 4 ++-- - fs/nfs/unlink.c | 4 ++-- - fs/proc/base.c | 2 +- - fs/proc/proc_sysctl.c | 2 +- - include/linux/dcache.h | 4 ++-- - include/linux/nfs_xdr.h | 2 +- - kernel/sched/swait.c | 1 + - 11 files changed, 29 insertions(+), 25 deletions(-) - -diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c -index 3925a7bfc74d..33f7723fb83e 100644 ---- a/fs/cifs/readdir.c -+++ b/fs/cifs/readdir.c -@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, - struct inode *inode; - struct super_block *sb = parent->d_sb; - struct cifs_sb_info *cifs_sb = CIFS_SB(sb); -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - - cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); - -diff --git a/fs/dcache.c b/fs/dcache.c -index dcde8ffe384c..b2a00f3ff7df 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -2421,21 +2421,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n) - - static void d_wait_lookup(struct dentry *dentry) - { -- if (d_in_lookup(dentry)) { -- DECLARE_WAITQUEUE(wait, current); -- add_wait_queue(dentry->d_wait, &wait); -- do { -- set_current_state(TASK_UNINTERRUPTIBLE); -- spin_unlock(&dentry->d_lock); -- schedule(); -- spin_lock(&dentry->d_lock); -- } while (d_in_lookup(dentry)); -- } -+ struct swait_queue __wait; -+ -+ if (!d_in_lookup(dentry)) -+ return; -+ -+ INIT_LIST_HEAD(&__wait.task_list); -+ do { -+ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); -+ spin_unlock(&dentry->d_lock); -+ schedule(); -+ spin_lock(&dentry->d_lock); -+ } while (d_in_lookup(dentry)); -+ finish_swait(dentry->d_wait, &__wait); - } - - struct dentry *d_alloc_parallel(struct dentry *parent, - const struct qstr *name, -- wait_queue_head_t *wq) -+ struct swait_queue_head *wq) - { - unsigned int hash = name->hash; - struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2550,7 +2553,7 @@ void __d_lookup_done(struct dentry *dentry) - hlist_bl_lock(b); - dentry->d_flags &= ~DCACHE_PAR_LOOKUP; - __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); -- wake_up_all(dentry->d_wait); -+ swake_up_all(dentry->d_wait); - dentry->d_wait = NULL; - hlist_bl_unlock(b); - INIT_HLIST_NODE(&dentry->d_u.d_alias); -diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c -index 6244345a5745..7ee10b7cc808 100644 ---- a/fs/fuse/dir.c -+++ b/fs/fuse/dir.c -@@ -1213,7 +1213,7 @@ static int fuse_direntplus_link(struct file *file, - struct inode *dir = d_inode(parent); - struct fuse_conn *fc; - struct inode *inode; -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - - if (!o->nodeid) { - /* -diff --git a/fs/namei.c b/fs/namei.c -index c00a7e1da4c0..742e7935f777 100644 ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -1646,7 +1646,7 @@ static struct dentry *__lookup_slow(const struct qstr *name, - { - struct dentry *dentry, *old; - struct inode *inode = dir->d_inode; -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - - /* Don't go there if it's already dead */ - if (unlikely(IS_DEADDIR(inode))) -@@ -3136,7 +3136,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, - struct dentry *dentry; - int error, create_error = 0; - umode_t mode = op->mode; -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - - if (unlikely(IS_DEADDIR(dir_inode))) - return -ENOENT; -diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c -index c60b20884c45..7e653c14c7ed 100644 ---- a/fs/nfs/dir.c -+++ b/fs/nfs/dir.c -@@ -457,7 +457,7 @@ static - void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) - { - struct qstr filename = QSTR_INIT(entry->name, entry->len); -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - struct dentry *dentry; - struct dentry *alias; - struct inode *dir = d_inode(parent); -@@ -1516,7 +1516,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned open_flags, - umode_t mode) - { -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - struct nfs_open_context *ctx; - struct dentry *res; - struct iattr attr = { .ia_valid = ATTR_OPEN }; -diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c -index ce9100b5604d..839bfa76f41e 100644 ---- a/fs/nfs/unlink.c -+++ b/fs/nfs/unlink.c -@@ -13,7 +13,7 @@ - #include - #include - #include --#include -+#include - #include - #include - -@@ -206,7 +206,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name) - goto out_free_name; - } - data->res.dir_attr = &data->dir_attr; -- init_waitqueue_head(&data->wq); -+ init_swait_queue_head(&data->wq); - - status = -EBUSY; - spin_lock(&dentry->d_lock); -diff --git a/fs/proc/base.c b/fs/proc/base.c -index 3b9b726b1a6c..a45d4d640f01 100644 ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -1890,7 +1890,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, - - child = d_hash_and_lookup(dir, &qname); - if (!child) { -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - child = d_alloc_parallel(dir, &qname, &wq); - if (IS_ERR(child)) - goto end_instantiate; -diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c -index c95f32b83a94..75f500cb7e74 100644 ---- a/fs/proc/proc_sysctl.c -+++ b/fs/proc/proc_sysctl.c -@@ -681,7 +681,7 @@ static bool proc_sys_fill_cache(struct file *file, - - child = d_lookup(dir, &qname); - if (!child) { -- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); -+ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); - child = d_alloc_parallel(dir, &qname, &wq); - if (IS_ERR(child)) - return false; -diff --git a/include/linux/dcache.h b/include/linux/dcache.h -index 0880baefd85f..8b4d6c8c1f7f 100644 ---- a/include/linux/dcache.h -+++ b/include/linux/dcache.h -@@ -105,7 +105,7 @@ struct dentry { - - union { - struct list_head d_lru; /* LRU list */ -- wait_queue_head_t *d_wait; /* in-lookup ones only */ -+ struct swait_queue_head *d_wait; /* in-lookup ones only */ - }; - struct list_head d_child; /* child of parent list */ - struct list_head d_subdirs; /* our children */ -@@ -236,7 +236,7 @@ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); - extern struct dentry * d_alloc_anon(struct super_block *); - extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); - extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, -- wait_queue_head_t *); -+ struct swait_queue_head *); - extern struct dentry * d_splice_alias(struct inode *, struct dentry *); - extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); - extern struct dentry * d_exact_alias(struct dentry *, struct inode *); -diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h -index cab24a127feb..73b0d19ef0d9 100644 ---- a/include/linux/nfs_xdr.h -+++ b/include/linux/nfs_xdr.h -@@ -1549,7 +1549,7 @@ struct nfs_unlinkdata { - struct nfs_removeargs args; - struct nfs_removeres res; - struct dentry *dentry; -- wait_queue_head_t wq; -+ struct swait_queue_head wq; - struct rpc_cred *cred; - struct nfs_fattr dir_attr; - long timeout; -diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c -index c7cb30cdd1b7..119a56d7f739 100644 ---- a/kernel/sched/swait.c -+++ b/kernel/sched/swait.c -@@ -70,6 +70,7 @@ void swake_up_all(struct swait_queue_head *q) - struct swait_queue *curr; - LIST_HEAD(tmp); - -+ WARN_ON(irqs_disabled()); - raw_spin_lock_irq(&q->lock); - list_splice_init(&q->task_list, &tmp); - while (!list_empty(&tmp)) { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch b/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch deleted file mode 100644 index 654072350..000000000 --- a/kernel/patches-4.19.x-rt/0191-workqueue-Use-normal-rcu.patch +++ /dev/null @@ -1,359 +0,0 @@ -From 10e5e4ecf5dab22ad4b607ba4222dce12d256c0e Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 24 Jul 2013 15:26:54 +0200 -Subject: [PATCH 191/328] workqueue: Use normal rcu - -There is no need for sched_rcu. The undocumented reason why sched_rcu -is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by -abusing the fact that sched_rcu reader side critical sections are also -protected by preempt or irq disabled regions. - -Signed-off-by: Thomas Gleixner ---- - kernel/workqueue.c | 95 +++++++++++++++++++++++++--------------------- - 1 file changed, 52 insertions(+), 43 deletions(-) - -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 493908464b9e..544007905706 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -127,7 +127,7 @@ enum { - * - * PL: wq_pool_mutex protected. - * -- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. -+ * PR: wq_pool_mutex protected for writes. RCU protected for reads. - * - * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. - * -@@ -136,7 +136,7 @@ enum { - * - * WQ: wq->mutex protected. - * -- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. -+ * WR: wq->mutex protected for writes. RCU protected for reads. - * - * MD: wq_mayday_lock protected. - */ -@@ -183,7 +183,7 @@ struct worker_pool { - atomic_t nr_running ____cacheline_aligned_in_smp; - - /* -- * Destruction of pool is sched-RCU protected to allow dereferences -+ * Destruction of pool is RCU protected to allow dereferences - * from get_work_pool(). - */ - struct rcu_head rcu; -@@ -212,7 +212,7 @@ struct pool_workqueue { - /* - * Release of unbound pwq is punted to system_wq. See put_pwq() - * and pwq_unbound_release_workfn() for details. pool_workqueue -- * itself is also sched-RCU protected so that the first pwq can be -+ * itself is also RCU protected so that the first pwq can be - * determined without grabbing wq->mutex. - */ - struct work_struct unbound_release_work; -@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - #include - - #define assert_rcu_or_pool_mutex() \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq_pool_mutex), \ -- "sched RCU or wq_pool_mutex should be held") -+ "RCU or wq_pool_mutex should be held") - - #define assert_rcu_or_wq_mutex(wq) \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq->mutex), \ -- "sched RCU or wq->mutex should be held") -+ "RCU or wq->mutex should be held") - - #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq->mutex) && \ - !lockdep_is_held(&wq_pool_mutex), \ -- "sched RCU, wq->mutex or wq_pool_mutex should be held") -+ "RCU, wq->mutex or wq_pool_mutex should be held") - - #define for_each_cpu_worker_pool(pool, cpu) \ - for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ -@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - * @pool: iteration cursor - * @pi: integer used for iteration - * -- * This must be called either with wq_pool_mutex held or sched RCU read -+ * This must be called either with wq_pool_mutex held or RCU read - * locked. If the pool needs to be used beyond the locking in effect, the - * caller is responsible for guaranteeing that the pool stays online. - * -@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - * @pwq: iteration cursor - * @wq: the target workqueue - * -- * This must be called either with wq->mutex held or sched RCU read locked. -+ * This must be called either with wq->mutex held or RCU read locked. - * If the pwq needs to be used beyond the locking in effect, the caller is - * responsible for guaranteeing that the pwq stays online. - * -@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) - * @wq: the target workqueue - * @node: the node ID - * -- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU -+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU - * read locked. - * If the pwq needs to be used beyond the locking in effect, the caller is - * responsible for guaranteeing that the pwq stays online. -@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) - * @work: the work item of interest - * - * Pools are created and destroyed under wq_pool_mutex, and allows read -- * access under sched-RCU read lock. As such, this function should be -- * called under wq_pool_mutex or with preemption disabled. -+ * access under RCU read lock. As such, this function should be -+ * called under wq_pool_mutex or inside of a rcu_read_lock() region. - * - * All fields of the returned pool are accessible as long as the above - * mentioned locking is in effect. If the returned pool needs to be used -@@ -1100,7 +1100,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) - { - if (pwq) { - /* -- * As both pwqs and pools are sched-RCU protected, the -+ * As both pwqs and pools are RCU protected, the - * following lock operations are safe. - */ - spin_lock_irq(&pwq->pool->lock); -@@ -1228,6 +1228,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) - return 0; - -+ rcu_read_lock(); - /* - * The queueing is in progress, or it is already queued. Try to - * steal it from ->worklist without clearing WORK_STRUCT_PENDING. -@@ -1266,10 +1267,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - set_work_pool_and_keep_pending(work, pool->id); - - spin_unlock(&pool->lock); -+ rcu_read_unlock(); - return 1; - } - spin_unlock(&pool->lock); - fail: -+ rcu_read_unlock(); - local_irq_restore(*flags); - if (work_is_canceling(work)) - return -ENOENT; -@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - if (unlikely(wq->flags & __WQ_DRAINING) && - WARN_ON_ONCE(!is_chained_work(wq))) - return; -+ rcu_read_lock(); - retry: - if (req_cpu == WORK_CPU_UNBOUND) - cpu = wq_select_unbound_cpu(raw_smp_processor_id()); -@@ -1439,10 +1443,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - /* pwq determined, queue */ - trace_workqueue_queue_work(req_cpu, pwq, work); - -- if (WARN_ON(!list_empty(&work->entry))) { -- spin_unlock(&pwq->pool->lock); -- return; -- } -+ if (WARN_ON(!list_empty(&work->entry))) -+ goto out; - - pwq->nr_in_flight[pwq->work_color]++; - work_flags = work_color_to_flags(pwq->work_color); -@@ -1460,7 +1462,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - - insert_work(pwq, work, worklist, work_flags); - -+out: - spin_unlock(&pwq->pool->lock); -+ rcu_read_unlock(); - } - - /** -@@ -2861,14 +2865,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, - - might_sleep(); - -- local_irq_disable(); -+ rcu_read_lock(); - pool = get_work_pool(work); - if (!pool) { -- local_irq_enable(); -+ rcu_read_unlock(); - return false; - } - -- spin_lock(&pool->lock); -+ spin_lock_irq(&pool->lock); - /* see the comment in try_to_grab_pending() with the same code */ - pwq = get_work_pwq(work); - if (pwq) { -@@ -2900,10 +2904,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, - lock_map_acquire(&pwq->wq->lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); - } -- -+ rcu_read_unlock(); - return true; - already_gone: - spin_unlock_irq(&pool->lock); -+ rcu_read_unlock(); - return false; - } - -@@ -3350,7 +3355,7 @@ static void rcu_free_pool(struct rcu_head *rcu) - * put_unbound_pool - put a worker_pool - * @pool: worker_pool to put - * -- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU -+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU - * safe manner. get_unbound_pool() calls this function on its failure path - * and this function should be able to release pools which went through, - * successfully or not, init_worker_pool(). -@@ -3404,8 +3409,8 @@ static void put_unbound_pool(struct worker_pool *pool) - del_timer_sync(&pool->idle_timer); - del_timer_sync(&pool->mayday_timer); - -- /* sched-RCU protected to allow dereferences from get_work_pool() */ -- call_rcu_sched(&pool->rcu, rcu_free_pool); -+ /* RCU protected to allow dereferences from get_work_pool() */ -+ call_rcu(&pool->rcu, rcu_free_pool); - } - - /** -@@ -3512,14 +3517,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) - put_unbound_pool(pool); - mutex_unlock(&wq_pool_mutex); - -- call_rcu_sched(&pwq->rcu, rcu_free_pwq); -+ call_rcu(&pwq->rcu, rcu_free_pwq); - - /* - * If we're the last pwq going away, @wq is already dead and no one - * is gonna access it anymore. Schedule RCU free. - */ - if (is_last) -- call_rcu_sched(&wq->rcu, rcu_free_wq); -+ call_rcu(&wq->rcu, rcu_free_wq); - } - - /** -@@ -4219,7 +4224,7 @@ void destroy_workqueue(struct workqueue_struct *wq) - * The base ref is never dropped on per-cpu pwqs. Directly - * schedule RCU free. - */ -- call_rcu_sched(&wq->rcu, rcu_free_wq); -+ call_rcu(&wq->rcu, rcu_free_wq); - } else { - /* - * We're the sole accessor of @wq at this point. Directly -@@ -4329,7 +4334,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) - struct pool_workqueue *pwq; - bool ret; - -- rcu_read_lock_sched(); -+ rcu_read_lock(); -+ preempt_disable(); - - if (cpu == WORK_CPU_UNBOUND) - cpu = smp_processor_id(); -@@ -4340,7 +4346,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) - pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); - - ret = !list_empty(&pwq->delayed_works); -- rcu_read_unlock_sched(); -+ preempt_enable(); -+ rcu_read_unlock(); - - return ret; - } -@@ -4366,15 +4373,15 @@ unsigned int work_busy(struct work_struct *work) - if (work_pending(work)) - ret |= WORK_BUSY_PENDING; - -- local_irq_save(flags); -+ rcu_read_lock(); - pool = get_work_pool(work); - if (pool) { -- spin_lock(&pool->lock); -+ spin_lock_irqsave(&pool->lock, flags); - if (find_worker_executing_work(pool, work)) - ret |= WORK_BUSY_RUNNING; -- spin_unlock(&pool->lock); -+ spin_unlock_irqrestore(&pool->lock, flags); - } -- local_irq_restore(flags); -+ rcu_read_unlock(); - - return ret; - } -@@ -4559,7 +4566,7 @@ void show_workqueue_state(void) - unsigned long flags; - int pi; - -- rcu_read_lock_sched(); -+ rcu_read_lock(); - - pr_info("Showing busy workqueues and worker pools:\n"); - -@@ -4624,7 +4631,7 @@ void show_workqueue_state(void) - touch_nmi_watchdog(); - } - -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - } - - /* used to show worker information through /proc/PID/{comm,stat,status} */ -@@ -5011,16 +5018,16 @@ bool freeze_workqueues_busy(void) - * nr_active is monotonically decreasing. It's safe - * to peek without lock. - */ -- rcu_read_lock_sched(); -+ rcu_read_lock(); - for_each_pwq(pwq, wq) { - WARN_ON_ONCE(pwq->nr_active < 0); - if (pwq->nr_active) { - busy = true; -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - goto out_unlock; - } - } -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - } - out_unlock: - mutex_unlock(&wq_pool_mutex); -@@ -5215,7 +5222,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, - const char *delim = ""; - int node, written = 0; - -- rcu_read_lock_sched(); -+ get_online_cpus(); -+ rcu_read_lock(); - for_each_node(node) { - written += scnprintf(buf + written, PAGE_SIZE - written, - "%s%d:%d", delim, node, -@@ -5223,7 +5231,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, - delim = " "; - } - written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); -+ put_online_cpus(); - - return written; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch b/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch deleted file mode 100644 index cbfdb0cc6..000000000 --- a/kernel/patches-4.19.x-rt/0192-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch +++ /dev/null @@ -1,185 +0,0 @@ -From 9c52df57d5e211043c5a5b6820933f14bdc83525 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:42:26 +0200 -Subject: [PATCH 192/328] workqueue: Use local irq lock instead of irq disable - regions - -Use a local_irq_lock as a replacement for irq off regions. We keep the -semantic of irq-off in regard to the pool->lock and remain preemptible. - -Signed-off-by: Thomas Gleixner ---- - kernel/workqueue.c | 45 ++++++++++++++++++++++++++++++--------------- - 1 file changed, 30 insertions(+), 15 deletions(-) - -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 544007905706..0efb8d25d940 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -49,6 +49,7 @@ - #include - #include - #include -+#include - - #include "workqueue_internal.h" - -@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq); - struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; - EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); - -+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); -+ - static int worker_thread(void *__worker); - static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - -@@ -1103,9 +1106,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) - * As both pwqs and pools are RCU protected, the - * following lock operations are safe. - */ -- spin_lock_irq(&pwq->pool->lock); -+ rcu_read_lock(); -+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); - put_pwq(pwq); -- spin_unlock_irq(&pwq->pool->lock); -+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); -+ rcu_read_unlock(); - } - } - -@@ -1209,7 +1214,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - struct worker_pool *pool; - struct pool_workqueue *pwq; - -- local_irq_save(*flags); -+ local_lock_irqsave(pendingb_lock, *flags); - - /* try to steal the timer if it exists */ - if (is_dwork) { -@@ -1273,7 +1278,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - spin_unlock(&pool->lock); - fail: - rcu_read_unlock(); -- local_irq_restore(*flags); -+ local_unlock_irqrestore(pendingb_lock, *flags); - if (work_is_canceling(work)) - return -ENOENT; - cpu_relax(); -@@ -1378,7 +1383,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - * queued or lose PENDING. Grabbing PENDING and queueing should - * happen with IRQ disabled. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL -+ /* -+ * nort: On RT the "interrupts-disabled" rule has been replaced with -+ * pendingb_lock. -+ */ - lockdep_assert_irqs_disabled(); -+#endif - - debug_work_activate(work); - -@@ -1484,14 +1495,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, - bool ret = false; - unsigned long flags; - -- local_irq_save(flags); -+ local_lock_irqsave(pendingb_lock,flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_work(cpu, wq, work); - ret = true; - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(queue_work_on); -@@ -1500,8 +1511,11 @@ void delayed_work_timer_fn(struct timer_list *t) - { - struct delayed_work *dwork = from_timer(dwork, t, timer); - -+ /* XXX */ -+ /* local_lock(pendingb_lock); */ - /* should have been called from irqsafe timer with irq already off */ - __queue_work(dwork->cpu, dwork->wq, &dwork->work); -+ /* local_unlock(pendingb_lock); */ - } - EXPORT_SYMBOL(delayed_work_timer_fn); - -@@ -1556,14 +1570,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - unsigned long flags; - - /* read the comment in __queue_work() */ -- local_irq_save(flags); -+ local_lock_irqsave(pendingb_lock, flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_delayed_work(cpu, wq, dwork, delay); - ret = true; - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(queue_delayed_work_on); -@@ -1598,7 +1612,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, - - if (likely(ret >= 0)) { - __queue_delayed_work(cpu, wq, dwork, delay); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - } - - /* -ENOENT from try_to_grab_pending() becomes %true */ -@@ -1609,11 +1623,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on); - static void rcu_work_rcufn(struct rcu_head *rcu) - { - struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); -+ unsigned long flags; - - /* read the comment in __queue_work() */ -- local_irq_disable(); -+ local_lock_irqsave(pendingb_lock, flags); - __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); -- local_irq_enable(); -+ local_unlock_irqrestore(pendingb_lock, flags); - } - - /** -@@ -3008,7 +3023,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) - - /* tell other tasks trying to grab @work to back off */ - mark_work_canceling(work); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - - /* - * This allows canceling during early boot. We know that @work -@@ -3069,10 +3084,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); - */ - bool flush_delayed_work(struct delayed_work *dwork) - { -- local_irq_disable(); -+ local_lock_irq(pendingb_lock); - if (del_timer_sync(&dwork->timer)) - __queue_work(dwork->cpu, dwork->wq, &dwork->work); -- local_irq_enable(); -+ local_unlock_irq(pendingb_lock); - return flush_work(&dwork->work); - } - EXPORT_SYMBOL(flush_delayed_work); -@@ -3110,7 +3125,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) - return false; - - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch b/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch deleted file mode 100644 index 0e782f22e..000000000 --- a/kernel/patches-4.19.x-rt/0193-workqueue-Prevent-workqueue-versus-ata-piix-livelock.patch +++ /dev/null @@ -1,138 +0,0 @@ -From e86a6ec786f05699d6e5490129ae8e2ede3bbe5e Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Mon, 1 Jul 2013 11:02:42 +0200 -Subject: [PATCH 193/328] workqueue: Prevent workqueue versus ata-piix livelock - -An Intel i7 system regularly detected rcu_preempt stalls after the kernel -was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no -longer possible, unless the system was restarted. - -The kernel message was: -INFO: rcu_preempt self-detected stall on CPU { 6} -[..] -NMI backtrace for cpu 6 -CPU 6 -Pid: 119, comm: irq/19-ata_piix Not tainted 3.8.13-rt13 #11 Shuttle Inc. SX58/SX58 -RIP: 0010:[] [] ip_compute_csum+0x30/0x30 -RSP: 0018:ffff880333303cb0 EFLAGS: 00000002 -RAX: 0000000000000006 RBX: 00000000000003e9 RCX: 0000000000000034 -RDX: 0000000000000000 RSI: ffffffff81aa16d0 RDI: 0000000000000001 -RBP: ffff880333303ce8 R08: ffffffff81aa16d0 R09: ffffffff81c1b8cc -R10: 0000000000000000 R11: 0000000000000000 R12: 000000000005161f -R13: 0000000000000006 R14: ffffffff81aa16d0 R15: 0000000000000002 -FS: 0000000000000000(0000) GS:ffff880333300000(0000) knlGS:0000000000000000 -CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b -CR2: 0000003c1b2bb420 CR3: 0000000001a0f000 CR4: 00000000000007e0 -DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 -DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 -Process irq/19-ata_piix (pid: 119, threadinfo ffff88032d88a000, task ffff88032df80000) -Stack: -ffffffff8124cb32 000000000005161e 00000000000003e9 0000000000001000 -0000000000009022 ffffffff81aa16d0 0000000000000002 ffff880333303cf8 -ffffffff8124caa9 ffff880333303d08 ffffffff8124cad2 ffff880333303d28 -Call Trace: - -[] ? delay_tsc+0x33/0xe3 -[] __delay+0xf/0x11 -[] __const_udelay+0x27/0x29 -[] native_safe_apic_wait_icr_idle+0x39/0x45 -[] __default_send_IPI_dest_field.constprop.0+0x1e/0x58 -[] default_send_IPI_mask_sequence_phys+0x49/0x7d -[] physflat_send_IPI_all+0x17/0x19 -[] arch_trigger_all_cpu_backtrace+0x50/0x79 -[] rcu_check_callbacks+0x1cb/0x568 -[] ? raise_softirq+0x2e/0x35 -[] ? tick_sched_do_timer+0x38/0x38 -[] update_process_times+0x44/0x55 -[] tick_sched_handle+0x4a/0x59 -[] tick_sched_timer+0x3c/0x5b -[] __run_hrtimer+0x9b/0x158 -[] hrtimer_interrupt+0x172/0x2aa -[] smp_apic_timer_interrupt+0x76/0x89 -[] apic_timer_interrupt+0x6d/0x80 - -[] ? __local_lock_irqsave+0x17/0x4a -[] try_to_grab_pending+0x42/0x17e -[] mod_delayed_work_on+0x32/0x88 -[] mod_delayed_work+0x1c/0x1e -[] blk_run_queue_async+0x37/0x39 -[] flush_end_io+0xf1/0x107 -[] blk_finish_request+0x21e/0x264 -[] blk_end_bidi_request+0x42/0x60 -[] blk_end_request+0x10/0x12 -[] scsi_io_completion+0x1bf/0x492 -[] ? sd_done+0x298/0x2ef -[] scsi_finish_command+0xe9/0xf2 -[] scsi_softirq_done+0x106/0x10f -[] blk_done_softirq+0x77/0x87 -[] do_current_softirqs+0x172/0x2e1 -[] ? irq_thread_fn+0x3a/0x3a -[] local_bh_enable+0x43/0x72 -[] irq_forced_thread_fn+0x46/0x52 -[] irq_thread+0x8c/0x17c -[] ? irq_thread+0x17c/0x17c -[] ? wake_threads_waitq+0x44/0x44 -[] kthread+0x8d/0x95 -[] ? __kthread_parkme+0x65/0x65 -[] ret_from_fork+0x7c/0xb0 -[] ? __kthread_parkme+0x65/0x65 - -The state of softirqd of this CPU at the time of the crash was: -ksoftirqd/6 R running task 0 53 2 0x00000000 -ffff88032fc39d18 0000000000000046 ffff88033330c4c0 ffff8803303f4710 -ffff88032fc39fd8 ffff88032fc39fd8 0000000000000000 0000000000062500 -ffff88032df88000 ffff8803303f4710 0000000000000000 ffff88032fc38000 -Call Trace: -[] ? __queue_work+0x27c/0x27c -[] preempt_schedule+0x61/0x76 -[] migrate_enable+0xe5/0x1df -[] ? __queue_work+0x27c/0x27c -[] run_timer_softirq+0x161/0x1d6 -[] do_current_softirqs+0x172/0x2e1 -[] run_ksoftirqd+0x2d/0x45 -[] smpboot_thread_fn+0x2ea/0x308 -[] ? test_ti_thread_flag+0xc/0xc -[] ? test_ti_thread_flag+0xc/0xc -[] kthread+0x8d/0x95 -[] ? __kthread_parkme+0x65/0x65 -[] ret_from_fork+0x7c/0xb0 -[] ? __kthread_parkme+0x65/0x65 - -Apparently, the softirq demon and the ata_piix IRQ handler were waiting -for each other to finish ending up in a livelock. After the below patch -was applied, the system no longer crashes. - -Reported-by: Carsten Emde -Proposed-by: Thomas Gleixner -Tested by: Carsten Emde -Signed-off-by: Carsten Emde -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/workqueue.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 0efb8d25d940..34734cdb5cb6 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -50,6 +50,7 @@ - #include - #include - #include -+#include - - #include "workqueue_internal.h" - -@@ -1281,7 +1282,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - local_unlock_irqrestore(pendingb_lock, *flags); - if (work_is_canceling(work)) - return -ENOENT; -- cpu_relax(); -+ cpu_chill(); - return -EAGAIN; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch b/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch deleted file mode 100644 index a2719a4ba..000000000 --- a/kernel/patches-4.19.x-rt/0194-sched-Distangle-worker-accounting-from-rqlock.patch +++ /dev/null @@ -1,290 +0,0 @@ -From 297b2be7a99ed420743272591f3bc082f5d0bd6f Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 22 Jun 2011 19:47:03 +0200 -Subject: [PATCH 194/328] sched: Distangle worker accounting from rqlock - -The worker accounting for cpu bound workers is plugged into the core -scheduler code and the wakeup code. This is not a hard requirement and -can be avoided by keeping track of the state in the workqueue code -itself. - -Keep track of the sleeping state in the worker itself and call the -notifier before entering the core scheduler. There might be false -positives when the task is woken between that call and actually -scheduling, but that's not really different from scheduling and being -woken immediately after switching away. There is also no harm from -updating nr_running when the task returns from scheduling instead of -accounting it in the wakeup code. - -Signed-off-by: Thomas Gleixner -Cc: Peter Zijlstra -Cc: Tejun Heo -Cc: Jens Axboe -Cc: Linus Torvalds -Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de -Signed-off-by: Thomas Gleixner -[bigeasy: preempt_disable() around wq_worker_sleeping() by Daniel Bristot de - Oliveira] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 90 +++++++++---------------------------- - kernel/workqueue.c | 52 ++++++++++----------- - kernel/workqueue_internal.h | 5 ++- - 3 files changed, 47 insertions(+), 100 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 3b2664e691de..dcf2deedd3f8 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1704,10 +1704,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl - { - activate_task(rq, p, en_flags); - p->on_rq = TASK_ON_RQ_QUEUED; -- -- /* If a worker is waking up, notify the workqueue: */ -- if (p->flags & PF_WQ_WORKER) -- wq_worker_waking_up(p, cpu_of(rq)); - } - - /* -@@ -2143,56 +2139,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) - return success; - } - --/** -- * try_to_wake_up_local - try to wake up a local task with rq lock held -- * @p: the thread to be awakened -- * @rf: request-queue flags for pinning -- * -- * Put @p on the run-queue if it's not already there. The caller must -- * ensure that this_rq() is locked, @p is bound to this_rq() and not -- * the current task. -- */ --static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf) --{ -- struct rq *rq = task_rq(p); -- -- if (WARN_ON_ONCE(rq != this_rq()) || -- WARN_ON_ONCE(p == current)) -- return; -- -- lockdep_assert_held(&rq->lock); -- -- if (!raw_spin_trylock(&p->pi_lock)) { -- /* -- * This is OK, because current is on_cpu, which avoids it being -- * picked for load-balance and preemption/IRQs are still -- * disabled avoiding further scheduler activity on it and we've -- * not yet picked a replacement task. -- */ -- rq_unlock(rq, rf); -- raw_spin_lock(&p->pi_lock); -- rq_relock(rq, rf); -- } -- -- if (!(p->state & TASK_NORMAL)) -- goto out; -- -- trace_sched_waking(p); -- -- if (!task_on_rq_queued(p)) { -- if (p->in_iowait) { -- delayacct_blkio_end(p); -- atomic_dec(&rq->nr_iowait); -- } -- ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); -- } -- -- ttwu_do_wakeup(rq, p, 0, rf); -- ttwu_stat(p, smp_processor_id(), 0); --out: -- raw_spin_unlock(&p->pi_lock); --} -- - /** - * wake_up_process - Wake up a specific process - * @p: The process to be woken up. -@@ -3561,21 +3507,6 @@ static void __sched notrace __schedule(bool preempt) - atomic_inc(&rq->nr_iowait); - delayacct_blkio_start(); - } -- -- /* -- * If a worker went to sleep, notify and ask workqueue -- * whether it wants to wake up a task to maintain -- * concurrency. -- * Only call wake up if prev isn't blocked on a sleeping -- * spin lock. -- */ -- if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { -- struct task_struct *to_wakeup; -- -- to_wakeup = wq_worker_sleeping(prev); -- if (to_wakeup) -- try_to_wake_up_local(to_wakeup, &rf); -- } - } - switch_count = &prev->nvcsw; - } -@@ -3635,6 +3566,20 @@ static inline void sched_submit_work(struct task_struct *tsk) - { - if (!tsk->state || tsk_is_pi_blocked(tsk)) - return; -+ -+ /* -+ * If a worker went to sleep, notify and ask workqueue whether -+ * it wants to wake up a task to maintain concurrency. -+ * As this function is called inside the schedule() context, -+ * we disable preemption to avoid it calling schedule() again -+ * in the possible wakeup of a kworker. -+ */ -+ if (tsk->flags & PF_WQ_WORKER) { -+ preempt_disable(); -+ wq_worker_sleeping(tsk); -+ preempt_enable_no_resched(); -+ } -+ - /* - * If we are going to sleep and we have plugged IO queued, - * make sure to submit it to avoid deadlocks. -@@ -3643,6 +3588,12 @@ static inline void sched_submit_work(struct task_struct *tsk) - blk_schedule_flush_plug(tsk); - } - -+static void sched_update_worker(struct task_struct *tsk) -+{ -+ if (tsk->flags & PF_WQ_WORKER) -+ wq_worker_running(tsk); -+} -+ - asmlinkage __visible void __sched schedule(void) - { - struct task_struct *tsk = current; -@@ -3653,6 +3604,7 @@ asmlinkage __visible void __sched schedule(void) - __schedule(false); - sched_preempt_enable_no_resched(); - } while (need_resched()); -+ sched_update_worker(tsk); - } - EXPORT_SYMBOL(schedule); - -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 34734cdb5cb6..045b82ca0eb5 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -843,43 +843,32 @@ static void wake_up_worker(struct worker_pool *pool) - } - - /** -- * wq_worker_waking_up - a worker is waking up -+ * wq_worker_running - a worker is running again - * @task: task waking up -- * @cpu: CPU @task is waking up to - * -- * This function is called during try_to_wake_up() when a worker is -- * being awoken. -- * -- * CONTEXT: -- * spin_lock_irq(rq->lock) -+ * This function is called when a worker returns from schedule() - */ --void wq_worker_waking_up(struct task_struct *task, int cpu) -+void wq_worker_running(struct task_struct *task) - { - struct worker *worker = kthread_data(task); - -- if (!(worker->flags & WORKER_NOT_RUNNING)) { -- WARN_ON_ONCE(worker->pool->cpu != cpu); -+ if (!worker->sleeping) -+ return; -+ if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(&worker->pool->nr_running); -- } -+ worker->sleeping = 0; - } - - /** - * wq_worker_sleeping - a worker is going to sleep - * @task: task going to sleep - * -- * This function is called during schedule() when a busy worker is -- * going to sleep. Worker on the same cpu can be woken up by -- * returning pointer to its task. -- * -- * CONTEXT: -- * spin_lock_irq(rq->lock) -- * -- * Return: -- * Worker task on @cpu to wake up, %NULL if none. -+ * This function is called from schedule() when a busy worker is -+ * going to sleep. - */ --struct task_struct *wq_worker_sleeping(struct task_struct *task) -+void wq_worker_sleeping(struct task_struct *task) - { -- struct worker *worker = kthread_data(task), *to_wakeup = NULL; -+ struct worker *next, *worker = kthread_data(task); - struct worker_pool *pool; - - /* -@@ -888,13 +877,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) - * checking NOT_RUNNING. - */ - if (worker->flags & WORKER_NOT_RUNNING) -- return NULL; -+ return; - - pool = worker->pool; - -- /* this can only happen on the local cpu */ -- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) -- return NULL; -+ if (WARN_ON_ONCE(worker->sleeping)) -+ return; -+ -+ worker->sleeping = 1; -+ spin_lock_irq(&pool->lock); - - /* - * The counterpart of the following dec_and_test, implied mb, -@@ -908,9 +899,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) - * lock is safe. - */ - if (atomic_dec_and_test(&pool->nr_running) && -- !list_empty(&pool->worklist)) -- to_wakeup = first_idle_worker(pool); -- return to_wakeup ? to_wakeup->task : NULL; -+ !list_empty(&pool->worklist)) { -+ next = first_idle_worker(pool); -+ if (next) -+ wake_up_process(next->task); -+ } -+ spin_unlock_irq(&pool->lock); - } - - /** -diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h -index 66fbb5a9e633..30cfed226b39 100644 ---- a/kernel/workqueue_internal.h -+++ b/kernel/workqueue_internal.h -@@ -44,6 +44,7 @@ struct worker { - unsigned long last_active; /* L: last active timestamp */ - unsigned int flags; /* X: flags */ - int id; /* I: worker id */ -+ int sleeping; /* None */ - - /* - * Opaque string set with work_set_desc(). Printed out with task -@@ -69,7 +70,7 @@ static inline struct worker *current_wq_worker(void) - * Scheduler hooks for concurrency managed workqueue. Only to be used from - * sched/core.c and workqueue.c. - */ --void wq_worker_waking_up(struct task_struct *task, int cpu); --struct task_struct *wq_worker_sleeping(struct task_struct *task); -+void wq_worker_running(struct task_struct *task); -+void wq_worker_sleeping(struct task_struct *task); - - #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch b/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch deleted file mode 100644 index be336c18a..000000000 --- a/kernel/patches-4.19.x-rt/0195-debugobjects-Make-RT-aware.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 5240d73278022cb45b35b96e5c56752146bc1b17 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:41:35 +0200 -Subject: [PATCH 195/328] debugobjects: Make RT aware - -Avoid filling the pool / allocating memory with irqs off(). - -Signed-off-by: Thomas Gleixner ---- - lib/debugobjects.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - -diff --git a/lib/debugobjects.c b/lib/debugobjects.c -index 14afeeb7d6ef..e28481c402ae 100644 ---- a/lib/debugobjects.c -+++ b/lib/debugobjects.c -@@ -376,7 +376,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) - struct debug_obj *obj; - unsigned long flags; - -- fill_pool(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (preempt_count() == 0 && !irqs_disabled()) -+#endif -+ fill_pool(); - - db = get_bucket((unsigned long) addr); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch b/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch deleted file mode 100644 index 77e61f57f..000000000 --- a/kernel/patches-4.19.x-rt/0196-seqlock-Prevent-rt-starvation.patch +++ /dev/null @@ -1,194 +0,0 @@ -From 8495d9ef448ef2ad328ed925e41605778ee51cd4 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 22 Feb 2012 12:03:30 +0100 -Subject: [PATCH 196/328] seqlock: Prevent rt starvation - -If a low prio writer gets preempted while holding the seqlock write -locked, a high prio reader spins forever on RT. - -To prevent this let the reader grab the spinlock, so it blocks and -eventually boosts the writer. This way the writer can proceed and -endless spinning is prevented. - -For seqcount writers we disable preemption over the update code -path. Thanks to Al Viro for distangling some VFS code to make that -possible. - -Nicholas Mc Guire: -- spin_lock+unlock => spin_unlock_wait -- __write_seqcount_begin => __raw_write_seqcount_begin - -Signed-off-by: Thomas Gleixner ---- - include/linux/seqlock.h | 57 ++++++++++++++++++++++++++++++++--------- - include/net/neighbour.h | 6 ++--- - 2 files changed, 48 insertions(+), 15 deletions(-) - -diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h -index bcf4cf26b8c8..689ed53016c7 100644 ---- a/include/linux/seqlock.h -+++ b/include/linux/seqlock.h -@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) - return __read_seqcount_retry(s, start); - } - -- -- --static inline void raw_write_seqcount_begin(seqcount_t *s) -+static inline void __raw_write_seqcount_begin(seqcount_t *s) - { - s->sequence++; - smp_wmb(); - } - --static inline void raw_write_seqcount_end(seqcount_t *s) -+static inline void raw_write_seqcount_begin(seqcount_t *s) -+{ -+ preempt_disable_rt(); -+ __raw_write_seqcount_begin(s); -+} -+ -+static inline void __raw_write_seqcount_end(seqcount_t *s) - { - smp_wmb(); - s->sequence++; - } - -+static inline void raw_write_seqcount_end(seqcount_t *s) -+{ -+ __raw_write_seqcount_end(s); -+ preempt_enable_rt(); -+} -+ - /** - * raw_write_seqcount_barrier - do a seq write barrier - * @s: pointer to seqcount_t -@@ -428,10 +438,33 @@ typedef struct { - /* - * Read side functions for starting and finalizing a read side section. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - static inline unsigned read_seqbegin(const seqlock_t *sl) - { - return read_seqcount_begin(&sl->seqcount); - } -+#else -+/* -+ * Starvation safe read side for RT -+ */ -+static inline unsigned read_seqbegin(seqlock_t *sl) -+{ -+ unsigned ret; -+ -+repeat: -+ ret = READ_ONCE(sl->seqcount.sequence); -+ if (unlikely(ret & 1)) { -+ /* -+ * Take the lock and let the writer proceed (i.e. evtl -+ * boost it), otherwise we could loop here forever. -+ */ -+ spin_unlock_wait(&sl->lock); -+ goto repeat; -+ } -+ smp_rmb(); -+ return ret; -+} -+#endif - - static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) - { -@@ -446,36 +479,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) - static inline void write_seqlock(seqlock_t *sl) - { - spin_lock(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock(&sl->lock); - } - - static inline void write_seqlock_bh(seqlock_t *sl) - { - spin_lock_bh(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock_bh(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_bh(&sl->lock); - } - - static inline void write_seqlock_irq(seqlock_t *sl) - { - spin_lock_irq(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock_irq(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irq(&sl->lock); - } - -@@ -484,7 +517,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) - unsigned long flags; - - spin_lock_irqsave(&sl->lock, flags); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - return flags; - } - -@@ -494,7 +527,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) - static inline void - write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irqrestore(&sl->lock, flags); - } - -diff --git a/include/net/neighbour.h b/include/net/neighbour.h -index 5ce035984a4d..1166fc17b757 100644 ---- a/include/net/neighbour.h -+++ b/include/net/neighbour.h -@@ -451,7 +451,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) - } - #endif - --static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) -+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) - { - unsigned int hh_alen = 0; - unsigned int seq; -@@ -493,7 +493,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb - - static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) - { -- const struct hh_cache *hh = &n->hh; -+ struct hh_cache *hh = &n->hh; - - if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) - return neigh_hh_output(hh, skb); -@@ -534,7 +534,7 @@ struct neighbour_cb { - - #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) - --static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, -+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, - const struct net_device *dev) - { - unsigned int seq; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch deleted file mode 100644 index d14bc0114..000000000 --- a/kernel/patches-4.19.x-rt/0197-sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 2ca7937e235cfe226669eea396294adeee79c34e Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Wed, 18 Feb 2015 16:05:28 +0100 -Subject: [PATCH 197/328] sunrpc: Make svc_xprt_do_enqueue() use - get_cpu_light() - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915 -|in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd -|Preemption disabled at:[] svc_xprt_received+0x4b/0xc0 [sunrpc] -|CPU: 6 PID: 3194 Comm: rpc.nfsd Not tainted 3.18.7-rt1 #9 -|Hardware name: MEDION MS-7848/MS-7848, BIOS M7848W08.404 11/06/2014 -| ffff880409630000 ffff8800d9a33c78 ffffffff815bdeb5 0000000000000002 -| 0000000000000000 ffff8800d9a33c98 ffffffff81073c86 ffff880408dd6008 -| ffff880408dd6000 ffff8800d9a33cb8 ffffffff815c3d84 ffff88040b3ac000 -|Call Trace: -| [] dump_stack+0x4f/0x9e -| [] __might_sleep+0xe6/0x150 -| [] rt_spin_lock+0x24/0x50 -| [] svc_xprt_do_enqueue+0x80/0x230 [sunrpc] -| [] svc_xprt_received+0x4b/0xc0 [sunrpc] -| [] svc_add_new_perm_xprt+0x6d/0x80 [sunrpc] -| [] svc_addsock+0x143/0x200 [sunrpc] -| [] write_ports+0x28c/0x340 [nfsd] -| [] nfsctl_transaction_write+0x4c/0x80 [nfsd] -| [] vfs_write+0xb3/0x1d0 -| [] SyS_write+0x49/0xb0 -| [] system_call_fastpath+0x16/0x1b - - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - net/sunrpc/svc_xprt.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c -index 6cf0fd37cbf0..48c0a0b90946 100644 ---- a/net/sunrpc/svc_xprt.c -+++ b/net/sunrpc/svc_xprt.c -@@ -393,7 +393,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) - if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) - return; - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = svc_pool_for_cpu(xprt->xpt_server, cpu); - - atomic_long_inc(&pool->sp_stats.packets); -@@ -417,7 +417,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) - rqstp = NULL; - out_unlock: - rcu_read_unlock(); -- put_cpu(); -+ put_cpu_light(); - trace_svc_xprt_do_enqueue(xprt, rqstp); - } - EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch b/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch deleted file mode 100644 index f549f0a3a..000000000 --- a/kernel/patches-4.19.x-rt/0198-net-Use-skbufhead-with-raw-lock.patch +++ /dev/null @@ -1,172 +0,0 @@ -From ac577d42ef159cea2e3512a52c9dbc43de1c7c5d Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 12 Jul 2011 15:38:34 +0200 -Subject: [PATCH 198/328] net: Use skbufhead with raw lock - -Use the rps lock as rawlock so we can keep irq-off regions. It looks low -latency. However we can't kfree() from this context therefore we defer this -to the softirq and use the tofree_queue list for it (similar to process_queue). - -Signed-off-by: Thomas Gleixner ---- - include/linux/netdevice.h | 1 + - include/linux/skbuff.h | 7 +++++++ - net/core/dev.c | 33 +++++++++++++++++++++++++-------- - 3 files changed, 33 insertions(+), 8 deletions(-) - -diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index 84bbdcbb199a..b816eb0bc1c4 100644 ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -2982,6 +2982,7 @@ struct softnet_data { - unsigned int dropped; - struct sk_buff_head input_pkt_queue; - struct napi_struct backlog; -+ struct sk_buff_head tofree_queue; - - }; - -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index 25407c206e73..d4d7aea13cc6 100644 ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -287,6 +287,7 @@ struct sk_buff_head { - - __u32 qlen; - spinlock_t lock; -+ raw_spinlock_t raw_lock; - }; - - struct sk_buff; -@@ -1718,6 +1719,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) - __skb_queue_head_init(list); - } - -+static inline void skb_queue_head_init_raw(struct sk_buff_head *list) -+{ -+ raw_spin_lock_init(&list->raw_lock); -+ __skb_queue_head_init(list); -+} -+ - static inline void skb_queue_head_init_class(struct sk_buff_head *list, - struct lock_class_key *class) - { -diff --git a/net/core/dev.c b/net/core/dev.c -index b754adb14205..985d1f703383 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -218,14 +218,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) - static inline void rps_lock(struct softnet_data *sd) - { - #ifdef CONFIG_RPS -- spin_lock(&sd->input_pkt_queue.lock); -+ raw_spin_lock(&sd->input_pkt_queue.raw_lock); - #endif - } - - static inline void rps_unlock(struct softnet_data *sd) - { - #ifdef CONFIG_RPS -- spin_unlock(&sd->input_pkt_queue.lock); -+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); - #endif - } - -@@ -5246,7 +5246,7 @@ static void flush_backlog(struct work_struct *work) - skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev->reg_state == NETREG_UNREGISTERING) { - __skb_unlink(skb, &sd->input_pkt_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -@@ -5256,11 +5256,14 @@ static void flush_backlog(struct work_struct *work) - skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev->reg_state == NETREG_UNREGISTERING) { - __skb_unlink(skb, &sd->process_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -+ if (!skb_queue_empty(&sd->tofree_queue)) -+ raise_softirq_irqoff(NET_RX_SOFTIRQ); - local_bh_enable(); -+ - } - - static void flush_all_backlogs(void) -@@ -5839,7 +5842,9 @@ static int process_backlog(struct napi_struct *napi, int quota) - while (again) { - struct sk_buff *skb; - -+ local_irq_disable(); - while ((skb = __skb_dequeue(&sd->process_queue))) { -+ local_irq_enable(); - rcu_read_lock(); - __netif_receive_skb(skb); - rcu_read_unlock(); -@@ -5847,9 +5852,9 @@ static int process_backlog(struct napi_struct *napi, int quota) - if (++work >= quota) - return work; - -+ local_irq_disable(); - } - -- local_irq_disable(); - rps_lock(sd); - if (skb_queue_empty(&sd->input_pkt_queue)) { - /* -@@ -6314,13 +6319,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) - unsigned long time_limit = jiffies + - usecs_to_jiffies(netdev_budget_usecs); - int budget = netdev_budget; -+ struct sk_buff_head tofree_q; -+ struct sk_buff *skb; - LIST_HEAD(list); - LIST_HEAD(repoll); - -+ __skb_queue_head_init(&tofree_q); -+ - local_irq_disable(); -+ skb_queue_splice_init(&sd->tofree_queue, &tofree_q); - list_splice_init(&sd->poll_list, &list); - local_irq_enable(); - -+ while ((skb = __skb_dequeue(&tofree_q))) -+ kfree_skb(skb); -+ - for (;;) { - struct napi_struct *n; - -@@ -9504,10 +9517,13 @@ static int dev_cpu_dead(unsigned int oldcpu) - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } -- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { -+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } -+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { -+ kfree_skb(skb); -+ } - - return 0; - } -@@ -9818,8 +9834,9 @@ static int __init net_dev_init(void) - - INIT_WORK(flush, flush_backlog); - -- skb_queue_head_init(&sd->input_pkt_queue); -- skb_queue_head_init(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->input_pkt_queue); -+ skb_queue_head_init_raw(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->tofree_queue); - #ifdef CONFIG_XFRM_OFFLOAD - skb_queue_head_init(&sd->xfrm_backlog); - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch deleted file mode 100644 index 12449857a..000000000 --- a/kernel/patches-4.19.x-rt/0199-net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ /dev/null @@ -1,277 +0,0 @@ -From 2b3882e6dc68a87b4b958396528cf0ccf1d9c5b4 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 13 Jan 2016 15:55:02 +0100 -Subject: [PATCH 199/328] net: move xmit_recursion to per-task variable on -RT - -A softirq on -RT can be preempted. That means one task is in -__dev_queue_xmit(), gets preempted and another task may enter -__dev_queue_xmit() aw well. netperf together with a bridge device -will then trigger the `recursion alert` because each task increments -the xmit_recursion variable which is per-CPU. -A virtual device like br0 is required to trigger this warning. - -This patch moves the lock owner and counter to be per task instead per-CPU so -it counts the recursion properly on -RT. The owner is also a task now and not a -CPU number. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/netdevice.h | 95 ++++++++++++++++++++++++++++++++++++--- - include/linux/sched.h | 3 ++ - net/core/dev.c | 15 ++++--- - net/core/filter.c | 6 +-- - 4 files changed, 104 insertions(+), 15 deletions(-) - -diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index b816eb0bc1c4..5de4b66e11fe 100644 ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -587,7 +587,11 @@ struct netdev_queue { - * write-mostly part - */ - spinlock_t _xmit_lock ____cacheline_aligned_in_smp; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct task_struct *xmit_lock_owner; -+#else - int xmit_lock_owner; -+#endif - /* - * Time (in jiffies) of last Tx - */ -@@ -2620,14 +2624,53 @@ void netdev_freemem(struct net_device *dev); - void synchronize_net(void); - int init_dummy_netdev(struct net_device *dev); - --DECLARE_PER_CPU(int, xmit_recursion); - #define XMIT_RECURSION_LIMIT 10 -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int dev_recursion_level(void) -+{ -+ return current->xmit_recursion; -+} -+ -+static inline int xmit_rec_read(void) -+{ -+ return current->xmit_recursion; -+} -+ -+static inline void xmit_rec_inc(void) -+{ -+ current->xmit_recursion++; -+} -+ -+static inline void xmit_rec_dec(void) -+{ -+ current->xmit_recursion--; -+} -+ -+#else -+ -+DECLARE_PER_CPU(int, xmit_recursion); - - static inline int dev_recursion_level(void) - { - return this_cpu_read(xmit_recursion); - } - -+static inline int xmit_rec_read(void) -+{ -+ return __this_cpu_read(xmit_recursion); -+} -+ -+static inline void xmit_rec_inc(void) -+{ -+ __this_cpu_inc(xmit_recursion); -+} -+ -+static inline void xmit_rec_dec(void) -+{ -+ __this_cpu_dec(xmit_recursion); -+} -+#endif -+ - struct net_device *dev_get_by_index(struct net *net, int ifindex); - struct net_device *__dev_get_by_index(struct net *net, int ifindex); - struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -@@ -3805,10 +3848,48 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) - return (1U << debug_value) - 1; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) -+{ -+ txq->xmit_lock_owner = current; -+} -+ -+static inline void netdev_queue_clear_owner(struct netdev_queue *txq) -+{ -+ txq->xmit_lock_owner = NULL; -+} -+ -+static inline bool netdev_queue_has_owner(struct netdev_queue *txq) -+{ -+ if (txq->xmit_lock_owner != NULL) -+ return true; -+ return false; -+} -+ -+#else -+ -+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) -+{ -+ txq->xmit_lock_owner = cpu; -+} -+ -+static inline void netdev_queue_clear_owner(struct netdev_queue *txq) -+{ -+ txq->xmit_lock_owner = -1; -+} -+ -+static inline bool netdev_queue_has_owner(struct netdev_queue *txq) -+{ -+ if (txq->xmit_lock_owner != -1) -+ return true; -+ return false; -+} -+#endif -+ - static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) - { - spin_lock(&txq->_xmit_lock); -- txq->xmit_lock_owner = cpu; -+ netdev_queue_set_owner(txq, cpu); - } - - static inline bool __netif_tx_acquire(struct netdev_queue *txq) -@@ -3825,32 +3906,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq) - static inline void __netif_tx_lock_bh(struct netdev_queue *txq) - { - spin_lock_bh(&txq->_xmit_lock); -- txq->xmit_lock_owner = smp_processor_id(); -+ netdev_queue_set_owner(txq, smp_processor_id()); - } - - static inline bool __netif_tx_trylock(struct netdev_queue *txq) - { - bool ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) -- txq->xmit_lock_owner = smp_processor_id(); -+ netdev_queue_set_owner(txq, smp_processor_id()); - return ok; - } - - static inline void __netif_tx_unlock(struct netdev_queue *txq) - { -- txq->xmit_lock_owner = -1; -+ netdev_queue_clear_owner(txq); - spin_unlock(&txq->_xmit_lock); - } - - static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) - { -- txq->xmit_lock_owner = -1; -+ netdev_queue_clear_owner(txq); - spin_unlock_bh(&txq->_xmit_lock); - } - - static inline void txq_trans_update(struct netdev_queue *txq) - { -- if (txq->xmit_lock_owner != -1) -+ if (netdev_queue_has_owner(txq)) - txq->trans_start = jiffies; - } - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index a38a2c2a8fe4..756fed8f5994 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1216,6 +1216,9 @@ struct task_struct { - #endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; -+#endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int xmit_recursion; - #endif - int pagefault_disabled; - #ifdef CONFIG_MMU -diff --git a/net/core/dev.c b/net/core/dev.c -index 985d1f703383..b5dc8a521fd2 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -3538,8 +3538,10 @@ static void skb_update_prio(struct sk_buff *skb) - #define skb_update_prio(skb) - #endif - -+#ifndef CONFIG_PREEMPT_RT_FULL - DEFINE_PER_CPU(int, xmit_recursion); - EXPORT_SYMBOL(xmit_recursion); -+#endif - - /** - * dev_loopback_xmit - loop back @skb -@@ -3830,9 +3832,12 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) - if (dev->flags & IFF_UP) { - int cpu = smp_processor_id(); /* ok because BHs are off */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (txq->xmit_lock_owner != current) { -+#else - if (txq->xmit_lock_owner != cpu) { -- if (unlikely(__this_cpu_read(xmit_recursion) > -- XMIT_RECURSION_LIMIT)) -+#endif -+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) - goto recursion_alert; - - skb = validate_xmit_skb(skb, dev, &again); -@@ -3842,9 +3847,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) - HARD_TX_LOCK(dev, txq, cpu); - - if (!netif_xmit_stopped(txq)) { -- __this_cpu_inc(xmit_recursion); -+ xmit_rec_inc(); - skb = dev_hard_start_xmit(skb, dev, txq, &rc); -- __this_cpu_dec(xmit_recursion); -+ xmit_rec_dec(); - if (dev_xmit_complete(rc)) { - HARD_TX_UNLOCK(dev, txq); - goto out; -@@ -8563,7 +8568,7 @@ static void netdev_init_one_queue(struct net_device *dev, - /* Initialize queue lock */ - spin_lock_init(&queue->_xmit_lock); - netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); -- queue->xmit_lock_owner = -1; -+ netdev_queue_clear_owner(queue); - netdev_queue_numa_node_write(queue, NUMA_NO_NODE); - queue->dev = dev; - #ifdef CONFIG_BQL -diff --git a/net/core/filter.c b/net/core/filter.c -index 40b3af05c883..205cd1bb9bc2 100644 ---- a/net/core/filter.c -+++ b/net/core/filter.c -@@ -2000,7 +2000,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) - { - int ret; - -- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { -+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) { - net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); - kfree_skb(skb); - return -ENETDOWN; -@@ -2009,9 +2009,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) - skb->dev = dev; - skb->tstamp = 0; - -- __this_cpu_inc(xmit_recursion); -+ xmit_rec_inc(); - ret = dev_queue_xmit(skb); -- __this_cpu_dec(xmit_recursion); -+ xmit_rec_dec(); - - return ret; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch deleted file mode 100644 index 287880a49..000000000 --- a/kernel/patches-4.19.x-rt/0200-net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 621f9c07474a04eaee9cbb799a037e462a5b692c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 20 Jan 2016 15:39:05 +0100 -Subject: [PATCH 200/328] net: provide a way to delegate processing a softirq - to ksoftirqd - -If the NET_RX uses up all of his budget it moves the following NAPI -invocations into the `ksoftirqd`. On -RT it does not do so. Instead it -rises the NET_RX softirq in its current context again. - -In order to get closer to mainline's behaviour this patch provides -__raise_softirq_irqoff_ksoft() which raises the softirq in the ksoftird. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/interrupt.h | 8 ++++++++ - kernel/softirq.c | 21 +++++++++++++++++++++ - net/core/dev.c | 2 +- - 3 files changed, 30 insertions(+), 1 deletion(-) - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 99f8b7ace7c9..72333899f043 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -524,6 +524,14 @@ extern void thread_do_softirq(void); - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); - extern void __raise_softirq_irqoff(unsigned int nr); -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void __raise_softirq_irqoff_ksoft(unsigned int nr); -+#else -+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) -+{ -+ __raise_softirq_irqoff(nr); -+} -+#endif - - extern void raise_softirq_irqoff(unsigned int nr); - extern void raise_softirq(unsigned int nr); -diff --git a/kernel/softirq.c b/kernel/softirq.c -index 27a4bb2303d0..25bcf2f2714b 100644 ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -721,6 +721,27 @@ void __raise_softirq_irqoff(unsigned int nr) - wakeup_proper_softirq(nr); - } - -+/* -+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd -+ */ -+void __raise_softirq_irqoff_ksoft(unsigned int nr) -+{ -+ unsigned int mask; -+ -+ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || -+ !__this_cpu_read(ktimer_softirqd))) -+ return; -+ mask = 1UL << nr; -+ -+ trace_softirq_raise(nr); -+ or_softirq_pending(mask); -+ if (mask & TIMER_SOFTIRQS) -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; -+ else -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; -+ wakeup_proper_softirq(nr); -+} -+ - /* - * This function must run with irqs disabled! - */ -diff --git a/net/core/dev.c b/net/core/dev.c -index b5dc8a521fd2..ecdf7534ef13 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -6368,7 +6368,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) - list_splice_tail(&repoll, &list); - list_splice(&list, &sd->poll_list); - if (!list_empty(&sd->poll_list)) -- __raise_softirq_irqoff(NET_RX_SOFTIRQ); -+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); - - net_rps_action_and_irq_enable(sd); - out: --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch deleted file mode 100644 index 8e912e6b6..000000000 --- a/kernel/patches-4.19.x-rt/0201-net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 136dfe4108059e637cdcc04f03dbca6637386429 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 30 Mar 2016 13:36:29 +0200 -Subject: [PATCH 201/328] net: dev: always take qdisc's busylock in - __dev_xmit_skb() - -The root-lock is dropped before dev_hard_start_xmit() is invoked and after -setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away -by a task with a higher priority then the task with the higher priority -won't be able to submit packets to the NIC directly instead they will be -enqueued into the Qdisc. The NIC will remain idle until the task(s) with -higher priority leave the CPU and the task with lower priority gets back -and finishes the job. - -If we take always the busylock we ensure that the RT task can boost the -low-prio task and submit the packet. - -Signed-off-by: Sebastian Andrzej Siewior ---- - net/core/dev.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/net/core/dev.c b/net/core/dev.c -index ecdf7534ef13..6c6c1c6d534b 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -3466,7 +3466,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, - * This permits qdisc->running owner to get the lock more - * often and dequeue packets faster. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ contended = true; -+#else - contended = qdisc_is_running(q); -+#endif - if (unlikely(contended)) - spin_lock(&q->busylock); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch b/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch deleted file mode 100644 index 455f35f1d..000000000 --- a/kernel/patches-4.19.x-rt/0202-net-Qdisc-use-a-seqlock-instead-seqcount.patch +++ /dev/null @@ -1,292 +0,0 @@ -From 41d62c0f01ed3d5b94cee9c89dba5a3ba0947e42 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 14 Sep 2016 17:36:35 +0200 -Subject: [PATCH 202/328] net/Qdisc: use a seqlock instead seqcount - -The seqcount disables preemption on -RT while it is held which can't -remove. Also we don't want the reader to spin for ages if the writer is -scheduled out. The seqlock on the other hand will serialize / sleep on -the lock while writer is active. - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/seqlock.h | 9 +++++++++ - include/net/gen_stats.h | 9 +++++---- - include/net/net_seq_lock.h | 15 +++++++++++++++ - include/net/sch_generic.h | 19 +++++++++++++++++-- - net/core/gen_estimator.c | 6 +++--- - net/core/gen_stats.c | 8 ++++---- - net/sched/sch_api.c | 2 +- - net/sched/sch_generic.c | 12 ++++++++++++ - 8 files changed, 66 insertions(+), 14 deletions(-) - create mode 100644 include/net/net_seq_lock.h - -diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h -index 689ed53016c7..58f9909d6659 100644 ---- a/include/linux/seqlock.h -+++ b/include/linux/seqlock.h -@@ -482,6 +482,15 @@ static inline void write_seqlock(seqlock_t *sl) - __raw_write_seqcount_begin(&sl->seqcount); - } - -+static inline int try_write_seqlock(seqlock_t *sl) -+{ -+ if (spin_trylock(&sl->lock)) { -+ __raw_write_seqcount_begin(&sl->seqcount); -+ return 1; -+ } -+ return 0; -+} -+ - static inline void write_sequnlock(seqlock_t *sl) - { - __raw_write_seqcount_end(&sl->seqcount); -diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h -index 883bb9085f15..3b593cdeb9af 100644 ---- a/include/net/gen_stats.h -+++ b/include/net/gen_stats.h -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - - struct gnet_stats_basic_cpu { - struct gnet_stats_basic_packed bstats; -@@ -36,11 +37,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, - spinlock_t *lock, struct gnet_dump *d, - int padattr); - --int gnet_stats_copy_basic(const seqcount_t *running, -+int gnet_stats_copy_basic(net_seqlock_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b); --void __gnet_stats_copy_basic(const seqcount_t *running, -+void __gnet_stats_copy_basic(net_seqlock_t *running, - struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b); -@@ -60,13 +61,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, - struct net_rate_estimator __rcu **rate_est, - spinlock_t *lock, -- seqcount_t *running, struct nlattr *opt); -+ net_seqlock_t *running, struct nlattr *opt); - void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); - int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, - struct net_rate_estimator __rcu **ptr, - spinlock_t *lock, -- seqcount_t *running, struct nlattr *opt); -+ net_seqlock_t *running, struct nlattr *opt); - bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); - bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, - struct gnet_stats_rate_est64 *sample); -diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h -new file mode 100644 -index 000000000000..a7034298a82a ---- /dev/null -+++ b/include/net/net_seq_lock.h -@@ -0,0 +1,15 @@ -+#ifndef __NET_NET_SEQ_LOCK_H__ -+#define __NET_NET_SEQ_LOCK_H__ -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define net_seqlock_t seqlock_t -+# define net_seq_begin(__r) read_seqbegin(__r) -+# define net_seq_retry(__r, __s) read_seqretry(__r, __s) -+ -+#else -+# define net_seqlock_t seqcount_t -+# define net_seq_begin(__r) read_seqcount_begin(__r) -+# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s) -+#endif -+ -+#endif -diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h -index d737a6a2600b..2d35b952bf60 100644 ---- a/include/net/sch_generic.h -+++ b/include/net/sch_generic.h -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -100,7 +101,7 @@ struct Qdisc { - struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; - struct qdisc_skb_head q; - struct gnet_stats_basic_packed bstats; -- seqcount_t running; -+ net_seqlock_t running; - struct gnet_stats_queue qstats; - unsigned long state; - struct Qdisc *next_sched; -@@ -121,7 +122,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc) - { - if (qdisc->flags & TCQ_F_NOLOCK) - return spin_is_locked(&qdisc->seqlock); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ return spin_is_locked(&qdisc->running.lock) ? true : false; -+#else - return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; -+#endif - } - - static inline bool qdisc_run_begin(struct Qdisc *qdisc) -@@ -132,17 +137,27 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) - } else if (qdisc_is_running(qdisc)) { - return false; - } -+#ifdef CONFIG_PREEMPT_RT_BASE -+ if (try_write_seqlock(&qdisc->running)) -+ return true; -+ return false; -+#else - /* Variant of write_seqcount_begin() telling lockdep a trylock - * was attempted. - */ - raw_write_seqcount_begin(&qdisc->running); - seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); - return true; -+#endif - } - - static inline void qdisc_run_end(struct Qdisc *qdisc) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ write_sequnlock(&qdisc->running); -+#else - write_seqcount_end(&qdisc->running); -+#endif - if (qdisc->flags & TCQ_F_NOLOCK) - spin_unlock(&qdisc->seqlock); - } -@@ -459,7 +474,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) - return qdisc_lock(root); - } - --static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) -+static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) - { - struct Qdisc *root = qdisc_root_sleeping(qdisc); - -diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c -index e4e442d70c2d..c8fa906733fb 100644 ---- a/net/core/gen_estimator.c -+++ b/net/core/gen_estimator.c -@@ -46,7 +46,7 @@ - struct net_rate_estimator { - struct gnet_stats_basic_packed *bstats; - spinlock_t *stats_lock; -- seqcount_t *running; -+ net_seqlock_t *running; - struct gnet_stats_basic_cpu __percpu *cpu_bstats; - u8 ewma_log; - u8 intvl_log; /* period : (250ms << intvl_log) */ -@@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, - struct net_rate_estimator __rcu **rate_est, - spinlock_t *lock, -- seqcount_t *running, -+ net_seqlock_t *running, - struct nlattr *opt) - { - struct gnet_estimator *parm = nla_data(opt); -@@ -227,7 +227,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu_bstats, - struct net_rate_estimator __rcu **rate_est, - spinlock_t *lock, -- seqcount_t *running, struct nlattr *opt) -+ net_seqlock_t *running, struct nlattr *opt) - { - return gen_new_estimator(bstats, cpu_bstats, rate_est, - lock, running, opt); -diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c -index e2fd8baec65f..8bab88738691 100644 ---- a/net/core/gen_stats.c -+++ b/net/core/gen_stats.c -@@ -142,7 +142,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, - } - - void --__gnet_stats_copy_basic(const seqcount_t *running, -+__gnet_stats_copy_basic(net_seqlock_t *running, - struct gnet_stats_basic_packed *bstats, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b) -@@ -155,10 +155,10 @@ __gnet_stats_copy_basic(const seqcount_t *running, - } - do { - if (running) -- seq = read_seqcount_begin(running); -+ seq = net_seq_begin(running); - bstats->bytes = b->bytes; - bstats->packets = b->packets; -- } while (running && read_seqcount_retry(running, seq)); -+ } while (running && net_seq_retry(running, seq)); - } - EXPORT_SYMBOL(__gnet_stats_copy_basic); - -@@ -176,7 +176,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic); - * if the room in the socket buffer was not sufficient. - */ - int --gnet_stats_copy_basic(const seqcount_t *running, -+gnet_stats_copy_basic(net_seqlock_t *running, - struct gnet_dump *d, - struct gnet_stats_basic_cpu __percpu *cpu, - struct gnet_stats_basic_packed *b) -diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c -index 39e319d04bb8..fe99928aff78 100644 ---- a/net/sched/sch_api.c -+++ b/net/sched/sch_api.c -@@ -1166,7 +1166,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, - rcu_assign_pointer(sch->stab, stab); - } - if (tca[TCA_RATE]) { -- seqcount_t *running; -+ net_seqlock_t *running; - - err = -EOPNOTSUPP; - if (sch->flags & TCQ_F_MQROOT) { -diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c -index 4ab20f1138fd..a9ed58ca3924 100644 ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -575,7 +575,11 @@ struct Qdisc noop_qdisc = { - .ops = &noop_qdisc_ops, - .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), - .dev_queue = &noop_netdev_queue, -+#ifdef CONFIG_PREEMPT_RT_BASE -+ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), -+#else - .running = SEQCNT_ZERO(noop_qdisc.running), -+#endif - .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), - .gso_skb = { - .next = (struct sk_buff *)&noop_qdisc.gso_skb, -@@ -876,9 +880,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, - lockdep_set_class(&sch->busylock, - dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+ seqlock_init(&sch->running); -+ lockdep_set_class(&sch->running.seqcount, -+ dev->qdisc_running_key ?: &qdisc_running_key); -+ lockdep_set_class(&sch->running.lock, -+ dev->qdisc_running_key ?: &qdisc_running_key); -+#else - seqcount_init(&sch->running); - lockdep_set_class(&sch->running, - dev->qdisc_running_key ?: &qdisc_running_key); -+#endif - - sch->ops = ops; - sch->flags = ops->static_flags; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch b/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch deleted file mode 100644 index 42748d105..000000000 --- a/kernel/patches-4.19.x-rt/0203-net-add-back-the-missing-serialization-in-ip_send_un.patch +++ /dev/null @@ -1,98 +0,0 @@ -From 7ddcdec24f0b8d364eed41710f1a2a821979e5a1 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 31 Aug 2016 17:21:56 +0200 -Subject: [PATCH 203/328] net: add back the missing serialization in - ip_send_unicast_reply() -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Some time ago Sami Pietikäinen reported a crash on -RT in -ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire -(v3.12.8-rt11). Later (v3.18.8) the code was reworked and I dropped the -patch. As it turns out it was mistake. -I have reports that the same crash is possible with a similar backtrace. -It seems that vanilla protects access to this_cpu_ptr() via -local_bh_disable(). This does not work the on -RT since we can have -NET_RX and NET_TX running in parallel on the same CPU. -This is brings back the old locks. - -|Unable to handle kernel NULL pointer dereference at virtual address 00000010 -|PC is at __ip_make_skb+0x198/0x3e8 -|[] (__ip_make_skb) from [] (ip_push_pending_frames+0x20/0x40) -|[] (ip_push_pending_frames) from [] (ip_send_unicast_reply+0x210/0x22c) -|[] (ip_send_unicast_reply) from [] (tcp_v4_send_reset+0x190/0x1c0) -|[] (tcp_v4_send_reset) from [] (tcp_v4_do_rcv+0x22c/0x288) -|[] (tcp_v4_do_rcv) from [] (release_sock+0xb4/0x150) -|[] (release_sock) from [] (tcp_close+0x240/0x454) -|[] (tcp_close) from [] (inet_release+0x74/0x7c) -|[] (inet_release) from [] (sock_release+0x30/0xb0) -|[] (sock_release) from [] (sock_close+0x1c/0x24) -|[] (sock_close) from [] (__fput+0xe8/0x20c) -|[] (__fput) from [] (____fput+0x18/0x1c) -|[] (____fput) from [] (task_work_run+0xa4/0xb8) -|[] (task_work_run) from [] (do_work_pending+0xd0/0xe4) -|[] (do_work_pending) from [] (work_pending+0xc/0x20) -|Code: e3530001 8a000001 e3a00040 ea000011 (e5973010) - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - net/ipv4/tcp_ipv4.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index 6da393016c11..105e94ff1095 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -62,6 +62,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -634,6 +635,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) - } - EXPORT_SYMBOL(tcp_v4_send_check); - -+static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock); - /* - * This routine will send an RST to the other tcp. - * -@@ -768,6 +770,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) - arg.tos = ip_hdr(skb)->tos; - arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); - local_bh_disable(); -+ local_lock(tcp_sk_lock); - ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); - if (sk) - ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? -@@ -780,6 +783,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) - ctl_sk->sk_mark = 0; - __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); - __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); -+ local_unlock(tcp_sk_lock); - local_bh_enable(); - - #ifdef CONFIG_TCP_MD5SIG -@@ -860,6 +864,7 @@ static void tcp_v4_send_ack(const struct sock *sk, - arg.tos = tos; - arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); - local_bh_disable(); -+ local_lock(tcp_sk_lock); - ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); - if (sk) - ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? -@@ -871,6 +876,7 @@ static void tcp_v4_send_ack(const struct sock *sk, - - ctl_sk->sk_mark = 0; - __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); -+ local_unlock(tcp_sk_lock); - local_bh_enable(); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch b/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch deleted file mode 100644 index 7d85d5f00..000000000 --- a/kernel/patches-4.19.x-rt/0204-net-add-a-lock-around-icmp_sk.patch +++ /dev/null @@ -1,64 +0,0 @@ -From abcb99faf244ae2e089bd1933e80f0d8216e00b8 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 31 Aug 2016 17:54:09 +0200 -Subject: [PATCH 204/328] net: add a lock around icmp_sk() - -It looks like the this_cpu_ptr() access in icmp_sk() is protected with -local_bh_disable(). To avoid missing serialization in -RT I am adding -here a local lock. No crash has been observed, this is just precaution. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - net/ipv4/icmp.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c -index 4efa5e33513e..de67d595e298 100644 ---- a/net/ipv4/icmp.c -+++ b/net/ipv4/icmp.c -@@ -77,6 +77,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -204,6 +205,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; - * - * On SMP we have one ICMP socket per-cpu. - */ -+static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock); -+ - static struct sock *icmp_sk(struct net *net) - { - return *this_cpu_ptr(net->ipv4.icmp_sk); -@@ -214,12 +217,16 @@ static inline struct sock *icmp_xmit_lock(struct net *net) - { - struct sock *sk; - -+ if (!local_trylock(icmp_sk_lock)) -+ return NULL; -+ - sk = icmp_sk(net); - - if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { - /* This can happen if the output path signals a - * dst_link_failure() for an outgoing ICMP packet. - */ -+ local_unlock(icmp_sk_lock); - return NULL; - } - return sk; -@@ -228,6 +235,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net) - static inline void icmp_xmit_unlock(struct sock *sk) - { - spin_unlock(&sk->sk_lock.slock); -+ local_unlock(icmp_sk_lock); - } - - int sysctl_icmp_msgs_per_sec __read_mostly = 1000; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch deleted file mode 100644 index 075981241..000000000 --- a/kernel/patches-4.19.x-rt/0205-net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 6da7a2e74d33df8e51178520e3945133a97ee419 Mon Sep 17 00:00:00 2001 -From: Steven Rostedt -Date: Tue, 6 Dec 2016 17:50:30 -0500 -Subject: [PATCH 205/328] net: Have __napi_schedule_irqoff() disable interrupts - on RT - -A customer hit a crash where the napi sd->poll_list became corrupted. -The customer had the bnx2x driver, which does a -__napi_schedule_irqoff() in its interrupt handler. Unfortunately, when -running with CONFIG_PREEMPT_RT_FULL, this interrupt handler is run as a -thread and is preemptable. The call to ____napi_schedule() must be done -with interrupts disabled to protect the per cpu softnet_data's -"poll_list, which is protected by disabling interrupts (disabling -preemption is enough when all interrupts are threaded and -local_bh_disable() can't preempt)." - -As bnx2x isn't the only driver that does this, the safest thing to do -is to make __napi_schedule_irqoff() call __napi_schedule() instead when -CONFIG_PREEMPT_RT_FULL is enabled, which will call local_irq_save() -before calling ____napi_schedule(). - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Steven Rostedt (Red Hat) -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/netdevice.h | 12 ++++++++++++ - net/core/dev.c | 2 ++ - 2 files changed, 14 insertions(+) - -diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h -index 5de4b66e11fe..1d6bb0ab437f 100644 ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -422,7 +422,19 @@ typedef enum rx_handler_result rx_handler_result_t; - typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); - - void __napi_schedule(struct napi_struct *n); -+ -+/* -+ * When PREEMPT_RT_FULL is defined, all device interrupt handlers -+ * run as threads, and they can also be preempted (without PREEMPT_RT -+ * interrupt threads can not be preempted). Which means that calling -+ * __napi_schedule_irqoff() from an interrupt handler can be preempted -+ * and can corrupt the napi->poll_list. -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define __napi_schedule_irqoff(n) __napi_schedule(n) -+#else - void __napi_schedule_irqoff(struct napi_struct *n); -+#endif - - static inline bool napi_disable_pending(struct napi_struct *n) - { -diff --git a/net/core/dev.c b/net/core/dev.c -index 6c6c1c6d534b..19e2cd0897b3 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -5938,6 +5938,7 @@ bool napi_schedule_prep(struct napi_struct *n) - } - EXPORT_SYMBOL(napi_schedule_prep); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * __napi_schedule_irqoff - schedule for receive - * @n: entry to schedule -@@ -5949,6 +5950,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - } - EXPORT_SYMBOL(__napi_schedule_irqoff); -+#endif - - bool napi_complete_done(struct napi_struct *n, int work_done) - { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch b/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch deleted file mode 100644 index ce2534331..000000000 --- a/kernel/patches-4.19.x-rt/0206-irqwork-push-most-work-into-softirq-context.patch +++ /dev/null @@ -1,263 +0,0 @@ -From 6151f6a5fa3c014525a6c18524e092f1e47fb1e1 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 23 Jun 2015 15:32:51 +0200 -Subject: [PATCH 206/328] irqwork: push most work into softirq context - -Initially we defered all irqwork into softirq because we didn't want the -latency spikes if perf or another user was busy and delayed the RT task. -The NOHZ trigger (nohz_full_kick_work) was the first user that did not work -as expected if it did not run in the original irqwork context so we had to -bring it back somehow for it. push_irq_work_func is the second one that -requires this. - -This patch adds the IRQ_WORK_HARD_IRQ which makes sure the callback runs -in raw-irq context. Everything else is defered into softirq context. Without --RT we have the orignal behavior. - -This patch incorporates tglx orignal work which revoked a little bringing back -the arch_irq_work_raise() if possible and a few fixes from Steven Rostedt and -Mike Galbraith, - -[bigeasy: melt tglx's irq_work_tick_soft() which splits irq_work_tick() into a - hard and soft variant] -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/irq_work.h | 8 +++++ - kernel/irq_work.c | 75 ++++++++++++++++++++++++++++++---------- - kernel/rcu/tree.c | 1 + - kernel/sched/topology.c | 1 + - kernel/time/tick-sched.c | 1 + - kernel/time/timer.c | 2 ++ - 6 files changed, 70 insertions(+), 18 deletions(-) - -diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h -index b11fcdfd0770..0c50559987c5 100644 ---- a/include/linux/irq_work.h -+++ b/include/linux/irq_work.h -@@ -18,6 +18,8 @@ - - /* Doesn't want IPI, wait for tick: */ - #define IRQ_WORK_LAZY BIT(2) -+/* Run hard IRQ context, even on RT */ -+#define IRQ_WORK_HARD_IRQ BIT(3) - - #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) - -@@ -52,4 +54,10 @@ static inline bool irq_work_needs_cpu(void) { return false; } - static inline void irq_work_run(void) { } - #endif - -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+void irq_work_tick_soft(void); -+#else -+static inline void irq_work_tick_soft(void) { } -+#endif -+ - #endif /* _LINUX_IRQ_WORK_H */ -diff --git a/kernel/irq_work.c b/kernel/irq_work.c -index 73288914ed5e..2940622da5b3 100644 ---- a/kernel/irq_work.c -+++ b/kernel/irq_work.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include - - -@@ -57,29 +58,35 @@ void __weak arch_irq_work_raise(void) - } - - /* Enqueue on current CPU, work must already be claimed and preempt disabled */ --static void __irq_work_queue_local(struct irq_work *work) -+static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) - { -- /* If the work is "lazy", handle it from next tick if any */ -- if (work->flags & IRQ_WORK_LAZY) { -- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && -- tick_nohz_tick_stopped()) -- arch_irq_work_raise(); -- } else { -- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) -- arch_irq_work_raise(); -- } -+ bool empty; -+ -+ empty = llist_add(&work->llnode, list); -+ -+ if (empty && -+ (!(work->flags & IRQ_WORK_LAZY) || -+ tick_nohz_tick_stopped())) -+ arch_irq_work_raise(); - } - - /* Enqueue the irq work @work on the current CPU */ - bool irq_work_queue(struct irq_work *work) - { -+ struct llist_head *list; -+ - /* Only queue if not already pending */ - if (!irq_work_claim(work)) - return false; - - /* Queue the entry and raise the IPI if needed. */ - preempt_disable(); -- __irq_work_queue_local(work); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) -+ list = this_cpu_ptr(&lazy_list); -+ else -+ list = this_cpu_ptr(&raised_list); -+ -+ __irq_work_queue_local(work, list); - preempt_enable(); - - return true; -@@ -98,6 +105,9 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) - return irq_work_queue(work); - - #else /* CONFIG_SMP: */ -+ struct llist_head *list; -+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); -+ - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); - -@@ -106,13 +116,21 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) - return false; - - preempt_disable(); -+ -+ lazy_work = work->flags & IRQ_WORK_LAZY; -+ -+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) -+ list = &per_cpu(lazy_list, cpu); -+ else -+ list = &per_cpu(raised_list, cpu); -+ - if (cpu != smp_processor_id()) { - /* Arch remote IPI send/receive backend aren't NMI safe */ - WARN_ON_ONCE(in_nmi()); -- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) -+ if (llist_add(&work->llnode, list)) - arch_send_call_function_single_ipi(cpu); - } else { -- __irq_work_queue_local(work); -+ __irq_work_queue_local(work, list); - } - preempt_enable(); - -@@ -128,9 +146,8 @@ bool irq_work_needs_cpu(void) - raised = this_cpu_ptr(&raised_list); - lazy = this_cpu_ptr(&lazy_list); - -- if (llist_empty(raised) || arch_irq_work_has_interrupt()) -- if (llist_empty(lazy)) -- return false; -+ if (llist_empty(raised) && llist_empty(lazy)) -+ return false; - - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -144,8 +161,12 @@ static void irq_work_run_list(struct llist_head *list) - struct llist_node *llnode; - unsigned long flags; - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ /* -+ * nort: On RT IRQ-work may run in SOFTIRQ context. -+ */ - BUG_ON(!irqs_disabled()); -- -+#endif - if (llist_empty(list)) - return; - -@@ -177,7 +198,16 @@ static void irq_work_run_list(struct llist_head *list) - void irq_work_run(void) - { - irq_work_run_list(this_cpu_ptr(&raised_list)); -- irq_work_run_list(this_cpu_ptr(&lazy_list)); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { -+ /* -+ * NOTE: we raise softirq via IPI for safety, -+ * and execute in irq_work_tick() to move the -+ * overhead from hard to soft irq context. -+ */ -+ if (!llist_empty(this_cpu_ptr(&lazy_list))) -+ raise_softirq(TIMER_SOFTIRQ); -+ } else -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); - } - EXPORT_SYMBOL_GPL(irq_work_run); - -@@ -187,8 +217,17 @@ void irq_work_tick(void) - - if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) - irq_work_run_list(raised); -+ -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); -+} -+ -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+void irq_work_tick_soft(void) -+{ - irq_work_run_list(this_cpu_ptr(&lazy_list)); - } -+#endif - - /* - * Synchronize against the irq_work @entry, ensures the entry is not -diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c -index f162a4f54b05..278fe66bfb70 100644 ---- a/kernel/rcu/tree.c -+++ b/kernel/rcu/tree.c -@@ -1296,6 +1296,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) - !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && - (rnp->ffmask & rdp->grpmask)) { - init_irq_work(&rdp->rcu_iw, rcu_iw_handler); -+ rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ; - rdp->rcu_iw_pending = true; - rdp->rcu_iw_gp_seq = rnp->gp_seq; - irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); -diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c -index 74b694392f2f..fb4d11bab6b7 100644 ---- a/kernel/sched/topology.c -+++ b/kernel/sched/topology.c -@@ -279,6 +279,7 @@ static int init_rootdomain(struct root_domain *rd) - rd->rto_cpu = -1; - raw_spin_lock_init(&rd->rto_lock); - init_irq_work(&rd->rto_push_work, rto_push_irq_work_func); -+ rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ; - #endif - - init_dl_bw(&rd->dl_bw); -diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c -index 2b0ddd50e879..4d31ec98e968 100644 ---- a/kernel/time/tick-sched.c -+++ b/kernel/time/tick-sched.c -@@ -236,6 +236,7 @@ static void nohz_full_kick_func(struct irq_work *work) - - static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { - .func = nohz_full_kick_func, -+ .flags = IRQ_WORK_HARD_IRQ, - }; - - /* -diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index 3fab1c50bf1b..2fcd56aa6092 100644 ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -1735,6 +1735,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) - { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); - -+ irq_work_tick_soft(); -+ - __run_timers(base); - if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) - __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch b/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch deleted file mode 100644 index 559dd014c..000000000 --- a/kernel/patches-4.19.x-rt/0207-printk-Make-rt-aware.patch +++ /dev/null @@ -1,113 +0,0 @@ -From e06507e6c3956ece840a3de1c8124c035609a1fb Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 19 Sep 2012 14:50:37 +0200 -Subject: [PATCH 207/328] printk: Make rt aware - -Drop the lock before calling the console driver and do not disable -interrupts while printing to a serial console. - -Signed-off-by: Thomas Gleixner ---- - kernel/printk/printk.c | 33 ++++++++++++++++++++++++++++++--- - 1 file changed, 30 insertions(+), 3 deletions(-) - -diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index f934baed564d..c927f89961a8 100644 ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -1624,6 +1624,7 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) - return do_syslog(type, buf, len, SYSLOG_FROM_READER); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Special console_lock variants that help to reduce the risk of soft-lockups. - * They allow to pass console_lock to another printk() call using a busy wait. -@@ -1764,6 +1765,15 @@ static int console_trylock_spinning(void) - return 1; - } - -+#else -+ -+static int console_trylock_spinning(void) -+{ -+ return console_trylock(); -+} -+ -+#endif -+ - /* - * Call the console drivers, asking them to write out - * log_buf[start] to log_buf[end - 1]. -@@ -1779,6 +1789,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, - if (!console_drivers) - return; - -+ migrate_disable(); - for_each_console(con) { - if (exclusive_console && con != exclusive_console) - continue; -@@ -1794,6 +1805,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, - else - con->write(con, text, len); - } -+ migrate_enable(); - } - - int printk_delay_msec __read_mostly; -@@ -1988,20 +2000,30 @@ asmlinkage int vprintk_emit(int facility, int level, - - /* If called from the scheduler, we can not call up(). */ - if (!in_sched && pending_output) { -+ int may_trylock = 1; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * we can't take a sleeping lock with IRQs or preeption disabled -+ * so we can't print in these contexts -+ */ -+ if (!(preempt_count() == 0 && !irqs_disabled())) -+ may_trylock = 0; -+#endif - /* - * Disable preemption to avoid being preempted while holding - * console_sem which would prevent anyone from printing to - * console - */ -- preempt_disable(); -+ migrate_disable(); - /* - * Try to acquire and then immediately release the console - * semaphore. The release will print out buffers and wake up - * /dev/kmsg and syslog() users. - */ -- if (console_trylock_spinning()) -+ if (may_trylock && console_trylock_spinning()) - console_unlock(); -- preempt_enable(); -+ migrate_enable(); - } - - if (pending_output) -@@ -2463,6 +2485,10 @@ void console_unlock(void) - console_seq++; - raw_spin_unlock(&logbuf_lock); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ printk_safe_exit_irqrestore(flags); -+ call_console_drivers(ext_text, ext_len, text, len); -+#else - /* - * While actively printing out messages, if another printk() - * were to occur on another CPU, it may wait for this one to -@@ -2481,6 +2507,7 @@ void console_unlock(void) - } - - printk_safe_exit_irqrestore(flags); -+#endif - - if (do_cond_resched) - cond_resched(); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch deleted file mode 100644 index 860fe1678..000000000 --- a/kernel/patches-4.19.x-rt/0208-kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 0347ee41cebcbf76b3456e71c25eebd52611cdb9 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 19 May 2016 17:45:27 +0200 -Subject: [PATCH 208/328] kernel/printk: Don't try to print from IRQ/NMI region - -On -RT we try to acquire sleeping locks which might lead to warnings -from lockdep or a warn_on() from spin_try_lock() (which is a rtmutex on -RT). -We don't print in general from a IRQ off region so we should not try -this via console_unblank() / bust_spinlocks() as well. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index c927f89961a8..49ea374ba8ea 100644 ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -1789,6 +1789,11 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, - if (!console_drivers) - return; - -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { -+ if (in_irq() || in_nmi()) -+ return; -+ } -+ - migrate_disable(); - for_each_console(con) { - if (exclusive_console && con != exclusive_console) -@@ -2555,6 +2560,11 @@ void console_unblank(void) - { - struct console *c; - -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { -+ if (in_irq() || in_nmi()) -+ return; -+ } -+ - /* - * console_unblank can no longer be called in interrupt context unless - * oops_in_progress is set to 1.. --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch b/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch deleted file mode 100644 index a7a3d978a..000000000 --- a/kernel/patches-4.19.x-rt/0209-printk-Drop-the-logbuf_lock-more-often.patch +++ /dev/null @@ -1,82 +0,0 @@ -From defb111e7019c8c65c85ce7a673ffe3d4b4a1338 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 21 Mar 2013 19:01:05 +0100 -Subject: [PATCH 209/328] printk: Drop the logbuf_lock more often - -The lock is hold with irgs off. The latency drops 500us+ on my arm bugs -with a "full" buffer after executing "dmesg" on the shell. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 28 ++++++++++++++++++++++++++++ - 1 file changed, 28 insertions(+) - -diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c -index 49ea374ba8ea..acef3657a316 100644 ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -1427,12 +1427,23 @@ static int syslog_print_all(char __user *buf, int size, bool clear) - u64 next_seq; - u64 seq; - u32 idx; -+ int attempts = 0; -+ int num_msg; - - text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); - if (!text) - return -ENOMEM; - - logbuf_lock_irq(); -+ -+try_again: -+ attempts++; -+ if (attempts > 10) { -+ len = -EBUSY; -+ goto out; -+ } -+ num_msg = 0; -+ - /* - * Find first record that fits, including all following records, - * into the user-provided buffer for this dump. -@@ -1445,6 +1456,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) - len += msg_print_text(msg, true, NULL, 0); - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ logbuf_unlock_irq(); -+ logbuf_lock_irq(); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } - - /* move first record forward until length fits into the buffer */ -@@ -1456,6 +1475,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) - len -= msg_print_text(msg, true, NULL, 0); - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ logbuf_unlock_irq(); -+ logbuf_lock_irq(); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } - - /* last message fitting into this dump */ -@@ -1493,6 +1520,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) - clear_seq = log_next_seq; - clear_idx = log_next_idx; - } -+out: - logbuf_unlock_irq(); - - kfree(text); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch b/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch deleted file mode 100644 index 061ba569c..000000000 --- a/kernel/patches-4.19.x-rt/0210-ARM-enable-irq-in-translation-section-permission-fau.patch +++ /dev/null @@ -1,95 +0,0 @@ -From b991f2d263f639e8c4aa1a70ff0d979e6245a5b2 Mon Sep 17 00:00:00 2001 -From: "Yadi.hu" -Date: Wed, 10 Dec 2014 10:32:09 +0800 -Subject: [PATCH 210/328] ARM: enable irq in translation/section permission - fault handlers -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Probably happens on all ARM, with -CONFIG_PREEMPT_RT_FULL -CONFIG_DEBUG_ATOMIC_SLEEP - -This simple program.... - -int main() { - *((char*)0xc0001000) = 0; -}; - -[ 512.742724] BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 -[ 512.743000] in_atomic(): 0, irqs_disabled(): 128, pid: 994, name: a -[ 512.743217] INFO: lockdep is turned off. -[ 512.743360] irq event stamp: 0 -[ 512.743482] hardirqs last enabled at (0): [< (null)>] (null) -[ 512.743714] hardirqs last disabled at (0): [] copy_process+0x3b0/0x11c0 -[ 512.744013] softirqs last enabled at (0): [] copy_process+0x3b0/0x11c0 -[ 512.744303] softirqs last disabled at (0): [< (null)>] (null) -[ 512.744631] [] (unwind_backtrace+0x0/0x104) -[ 512.745001] [] (dump_stack+0x20/0x24) -[ 512.745355] [] (__might_sleep+0x1dc/0x1e0) -[ 512.745717] [] (rt_spin_lock+0x34/0x6c) -[ 512.746073] [] (do_force_sig_info+0x34/0xf0) -[ 512.746457] [] (force_sig_info+0x18/0x1c) -[ 512.746829] [] (__do_user_fault+0x9c/0xd8) -[ 512.747185] [] (do_bad_area+0x7c/0x94) -[ 512.747536] [] (do_sect_fault+0x40/0x48) -[ 512.747898] [] (do_DataAbort+0x40/0xa0) -[ 512.748181] Exception stack(0xecaa1fb0 to 0xecaa1ff8) - -Oxc0000000 belongs to kernel address space, user task can not be -allowed to access it. For above condition, correct result is that -test case should receive a “segment fault” and exits but not stacks. - -the root cause is commit 02fe2845d6a8 ("avoid enabling interrupts in -prefetch/data abort handlers"),it deletes irq enable block in Data -abort assemble code and move them into page/breakpiont/alignment fault -handlers instead. But author does not enable irq in translation/section -permission fault handlers. ARM disables irq when it enters exception/ -interrupt mode, if kernel doesn't enable irq, it would be still disabled -during translation/section permission fault. - -We see the above splat because do_force_sig_info is still called with -IRQs off, and that code eventually does a: - - spin_lock_irqsave(&t->sighand->siglock, flags); - -As this is architecture independent code, and we've not seen any other -need for other arch to have the siglock converted to raw lock, we can -conclude that we should enable irq for ARM translation/section -permission exception. - - -Signed-off-by: Yadi.hu -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/mm/fault.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index a9ee0d9dc740..20b0e146de98 100644 ---- a/arch/arm/mm/fault.c -+++ b/arch/arm/mm/fault.c -@@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, - if (addr < TASK_SIZE) - return do_page_fault(addr, fsr, regs); - -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - if (user_mode(regs)) - goto bad_area; - -@@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, - static int - do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - { -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - do_bad_area(addr, fsr, regs); - return 0; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch b/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch deleted file mode 100644 index 58654cf15..000000000 --- a/kernel/patches-4.19.x-rt/0211-genirq-update-irq_set_irqchip_state-documentation.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 4324fc16ee37a4f6f8e0e6fc46cdaa6e16a5747a Mon Sep 17 00:00:00 2001 -From: Josh Cartwright -Date: Thu, 11 Feb 2016 11:54:00 -0600 -Subject: [PATCH 211/328] genirq: update irq_set_irqchip_state documentation - -On -rt kernels, the use of migrate_disable()/migrate_enable() is -sufficient to guarantee a task isn't moved to another CPU. Update the -irq_set_irqchip_state() documentation to reflect this. - -Signed-off-by: Josh Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/irq/manage.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 3858ac895777..5701774a6d71 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -2330,7 +2330,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); - * This call sets the internal irqchip state of an interrupt, - * depending on the value of @which. - * -- * This function should be called with preemption disabled if the -+ * This function should be called with migration disabled if the - * interrupt controller has per-cpu registers. - */ - int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch deleted file mode 100644 index e76e48639..000000000 --- a/kernel/patches-4.19.x-rt/0212-KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 25e170a4bdc1953a46e60f3551c0ffd1f2b7b3aa Mon Sep 17 00:00:00 2001 -From: Josh Cartwright -Date: Thu, 11 Feb 2016 11:54:01 -0600 -Subject: [PATCH 212/328] KVM: arm/arm64: downgrade preempt_disable()d region - to migrate_disable() - -kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating -the vgic and timer states to prevent the calling task from migrating to -another CPU. It does so to prevent the task from writing to the -incorrect per-CPU GIC distributor registers. - -On -rt kernels, it's possible to maintain the same guarantee with the -use of migrate_{disable,enable}(), with the added benefit that the -migrate-disabled region is preemptible. Update -kvm_arch_vcpu_ioctl_run() to do so. - -Cc: Christoffer Dall -Reported-by: Manish Jaggi -Signed-off-by: Josh Cartwright -Signed-off-by: Sebastian Andrzej Siewior ---- - virt/kvm/arm/arm.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c -index d982650deb33..efe2d6c0201c 100644 ---- a/virt/kvm/arm/arm.c -+++ b/virt/kvm/arm/arm.c -@@ -723,7 +723,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) - * involves poking the GIC, which must be done in a - * non-preemptible context. - */ -- preempt_disable(); -+ migrate_disable(); - - kvm_pmu_flush_hwstate(vcpu); - -@@ -772,7 +772,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) - kvm_timer_sync_hwstate(vcpu); - kvm_vgic_sync_hwstate(vcpu); - local_irq_enable(); -- preempt_enable(); -+ migrate_enable(); - continue; - } - -@@ -850,7 +850,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) - /* Exit types that need handling before we can be preempted */ - handle_exit_early(vcpu, run, ret); - -- preempt_enable(); -+ migrate_enable(); - - ret = handle_exit(vcpu, run, ret); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch deleted file mode 100644 index 1c10ffe5b..000000000 --- a/kernel/patches-4.19.x-rt/0213-arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch +++ /dev/null @@ -1,170 +0,0 @@ -From 033bb93a0515b9df4593fbfcf93ea5ede830532c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 25 Jul 2018 14:02:38 +0200 -Subject: [PATCH 213/328] arm64: fpsimd: use preemp_disable in addition to - local_bh_disable() - -In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The -code disables BH and expects that it is not preemptible. On -RT the -task remains preemptible but remains the same CPU. This may corrupt the -content of the SIMD registers if the task is preempted during -saving/restoring those registers. - -Add preempt_disable()/enable() to enfore the required semantic on -RT. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm64/kernel/fpsimd.c | 31 +++++++++++++++++++++++++++++-- - 1 file changed, 29 insertions(+), 2 deletions(-) - -diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c -index 14fdbaa6ee3a..7d572175682a 100644 ---- a/arch/arm64/kernel/fpsimd.c -+++ b/arch/arm64/kernel/fpsimd.c -@@ -159,6 +159,16 @@ static void sve_free(struct task_struct *task) - __sve_free(task); - } - -+static void *sve_free_atomic(struct task_struct *task) -+{ -+ void *sve_state = task->thread.sve_state; -+ -+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); -+ -+ task->thread.sve_state = NULL; -+ return sve_state; -+} -+ - /* - * TIF_SVE controls whether a task can use SVE without trapping while - * in userspace, and also the way a task's FPSIMD/SVE state is stored -@@ -549,6 +559,7 @@ int sve_set_vector_length(struct task_struct *task, - * non-SVE thread. - */ - if (task == current) { -+ preempt_disable(); - local_bh_disable(); - - fpsimd_save(); -@@ -559,8 +570,10 @@ int sve_set_vector_length(struct task_struct *task, - if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) - sve_to_fpsimd(task); - -- if (task == current) -+ if (task == current) { - local_bh_enable(); -+ preempt_enable(); -+ } - - /* - * Force reallocation of task SVE state to the correct size -@@ -815,6 +828,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) - - sve_alloc(current); - -+ preempt_disable(); - local_bh_disable(); - - fpsimd_save(); -@@ -828,6 +842,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) - WARN_ON(1); /* SVE access shouldn't have trapped */ - - local_bh_enable(); -+ preempt_enable(); - } - - /* -@@ -894,10 +909,12 @@ void fpsimd_thread_switch(struct task_struct *next) - void fpsimd_flush_thread(void) - { - int vl, supported_vl; -+ void *mem = NULL; - - if (!system_supports_fpsimd()) - return; - -+ preempt_disable(); - local_bh_disable(); - - memset(¤t->thread.uw.fpsimd_state, 0, -@@ -906,7 +923,7 @@ void fpsimd_flush_thread(void) - - if (system_supports_sve()) { - clear_thread_flag(TIF_SVE); -- sve_free(current); -+ mem = sve_free_atomic(current); - - /* - * Reset the task vector length as required. -@@ -942,6 +959,8 @@ void fpsimd_flush_thread(void) - set_thread_flag(TIF_FOREIGN_FPSTATE); - - local_bh_enable(); -+ preempt_enable(); -+ kfree(mem); - } - - /* -@@ -953,9 +972,11 @@ void fpsimd_preserve_current_state(void) - if (!system_supports_fpsimd()) - return; - -+ preempt_disable(); - local_bh_disable(); - fpsimd_save(); - local_bh_enable(); -+ preempt_enable(); - } - - /* -@@ -1026,6 +1047,7 @@ void fpsimd_restore_current_state(void) - return; - } - -+ preempt_disable(); - local_bh_disable(); - - if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { -@@ -1034,6 +1056,7 @@ void fpsimd_restore_current_state(void) - } - - local_bh_enable(); -+ preempt_enable(); - } - - /* -@@ -1046,6 +1069,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) - if (WARN_ON(!system_supports_fpsimd())) - return; - -+ preempt_disable(); - local_bh_disable(); - - current->thread.uw.fpsimd_state = *state; -@@ -1058,6 +1082,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) - clear_thread_flag(TIF_FOREIGN_FPSTATE); - - local_bh_enable(); -+ preempt_enable(); - } - - /* -@@ -1104,6 +1129,7 @@ void kernel_neon_begin(void) - - BUG_ON(!may_use_simd()); - -+ preempt_disable(); - local_bh_disable(); - - __this_cpu_write(kernel_neon_busy, true); -@@ -1117,6 +1143,7 @@ void kernel_neon_begin(void) - preempt_disable(); - - local_bh_enable(); -+ preempt_enable(); - } - EXPORT_SYMBOL(kernel_neon_begin); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch b/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch deleted file mode 100644 index 3b8d25fcf..000000000 --- a/kernel/patches-4.19.x-rt/0214-kgdb-serial-Short-term-workaround.patch +++ /dev/null @@ -1,85 +0,0 @@ -From c5d74bf61d048136c3ebff3f2d803d4eba474db7 Mon Sep 17 00:00:00 2001 -From: Jason Wessel -Date: Thu, 28 Jul 2011 12:42:23 -0500 -Subject: [PATCH 214/328] kgdb/serial: Short term workaround - -On 07/27/2011 04:37 PM, Thomas Gleixner wrote: -> - KGDB (not yet disabled) is reportedly unusable on -rt right now due -> to missing hacks in the console locking which I dropped on purpose. -> - -To work around this in the short term you can use this patch, in -addition to the clocksource watchdog patch that Thomas brewed up. - -Comments are welcome of course. Ultimately the right solution is to -change separation between the console and the HW to have a polled mode -+ work queue so as not to introduce any kind of latency. - -Thanks, -Jason. ---- - drivers/tty/serial/8250/8250_port.c | 3 +++ - include/linux/kdb.h | 2 ++ - kernel/debug/kdb/kdb_io.c | 2 ++ - 3 files changed, 7 insertions(+) - -diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index 6b1d46c1df3b..cd49a76be52a 100644 ---- a/drivers/tty/serial/8250/8250_port.c -+++ b/drivers/tty/serial/8250/8250_port.c -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -3241,6 +3242,8 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, - - if (port->sysrq || oops_in_progress) - locked = 0; -+ else if (in_kdb_printk()) -+ locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); - -diff --git a/include/linux/kdb.h b/include/linux/kdb.h -index 68bd88223417..e033b25b0b72 100644 ---- a/include/linux/kdb.h -+++ b/include/linux/kdb.h -@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, - extern __printf(1, 2) int kdb_printf(const char *, ...); - typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); - -+#define in_kdb_printk() (kdb_trap_printk) - extern void kdb_init(int level); - - /* Access to kdb specific polling devices */ -@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *, - extern int kdb_unregister(char *); - #else /* ! CONFIG_KGDB_KDB */ - static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } -+#define in_kdb_printk() (0) - static inline void kdb_init(int level) {} - static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } -diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c -index 6a4b41484afe..197cb422f6e1 100644 ---- a/kernel/debug/kdb/kdb_io.c -+++ b/kernel/debug/kdb/kdb_io.c -@@ -857,9 +857,11 @@ int kdb_printf(const char *fmt, ...) - va_list ap; - int r; - -+ kdb_trap_printk++; - va_start(ap, fmt); - r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); - va_end(ap); -+ kdb_trap_printk--; - - return r; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch b/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch deleted file mode 100644 index c2ee02050..000000000 --- a/kernel/patches-4.19.x-rt/0215-sysfs-Add-sys-kernel-realtime-entry.patch +++ /dev/null @@ -1,53 +0,0 @@ -From f69088aeda4b17cc974e451f18f7f10d999a6f57 Mon Sep 17 00:00:00 2001 -From: Clark Williams -Date: Sat, 30 Jul 2011 21:55:53 -0500 -Subject: [PATCH 215/328] sysfs: Add /sys/kernel/realtime entry - -Add a /sys/kernel entry to indicate that the kernel is a -realtime kernel. - -Clark says that he needs this for udev rules, udev needs to evaluate -if its a PREEMPT_RT kernel a few thousand times and parsing uname -output is too slow or so. - -Are there better solutions? Should it exist and return 0 on !-rt? - -Signed-off-by: Clark Williams -Signed-off-by: Peter Zijlstra ---- - kernel/ksysfs.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c -index 46ba853656f6..9a23632b6294 100644 ---- a/kernel/ksysfs.c -+++ b/kernel/ksysfs.c -@@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo); - - #endif /* CONFIG_CRASH_CORE */ - -+#if defined(CONFIG_PREEMPT_RT_FULL) -+static ssize_t realtime_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%d\n", 1); -+} -+KERNEL_ATTR_RO(realtime); -+#endif -+ - /* whether file capabilities are enabled */ - static ssize_t fscaps_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -@@ -230,6 +239,9 @@ static struct attribute * kernel_attrs[] = { - #ifndef CONFIG_TINY_RCU - &rcu_expedited_attr.attr, - &rcu_normal_attr.attr, -+#endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ &realtime_attr.attr, - #endif - NULL - }; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch b/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch deleted file mode 100644 index a5ee1563b..000000000 --- a/kernel/patches-4.19.x-rt/0216-mm-rt-kmap_atomic-scheduling.patch +++ /dev/null @@ -1,324 +0,0 @@ -From 5f53510a53551468cacc7dc39a067e4890082c6a Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Thu, 28 Jul 2011 10:43:51 +0200 -Subject: [PATCH 216/328] mm, rt: kmap_atomic scheduling - -In fact, with migrate_disable() existing one could play games with -kmap_atomic. You could save/restore the kmap_atomic slots on context -switch (if there are any in use of course), this should be esp easy now -that we have a kmap_atomic stack. - -Something like the below.. it wants replacing all the preempt_disable() -stuff with pagefault_disable() && migrate_disable() of course, but then -you can flip kmaps around like below. - -Signed-off-by: Peter Zijlstra -[dvhart@linux.intel.com: build fix] -Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins - -[tglx@linutronix.de: Get rid of the per cpu variable and store the idx - and the pte content right away in the task struct. - Shortens the context switch code. ] ---- - arch/x86/kernel/process_32.c | 32 ++++++++++++++++++++++++++++++++ - arch/x86/mm/highmem_32.c | 13 ++++++++++--- - arch/x86/mm/iomap_32.c | 9 ++++++++- - include/linux/highmem.h | 31 +++++++++++++++++++++++++------ - include/linux/sched.h | 7 +++++++ - include/linux/uaccess.h | 2 ++ - mm/highmem.c | 6 ++++-- - 7 files changed, 88 insertions(+), 12 deletions(-) - -diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c -index 020efe0f9614..5d0c975559ad 100644 ---- a/arch/x86/kernel/process_32.c -+++ b/arch/x86/kernel/process_32.c -@@ -38,6 +38,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -205,6 +206,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) - } - EXPORT_SYMBOL_GPL(start_thread); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ pte_t *ptep = kmap_pte - idx; -+ -+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); -+ } -+} -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - - /* - * switch_to(x,y) should switch tasks from x to y. -@@ -274,6 +304,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) - - switch_to_extra(prev_p, next_p); - -+ switch_kmaps(prev_p, next_p); -+ - /* - * Leave lazy mode, flushing any hypercalls made here. - * This must be done before restoring TLS segments so -diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c -index 6d18b70ed5a9..f752724c22e8 100644 ---- a/arch/x86/mm/highmem_32.c -+++ b/arch/x86/mm/highmem_32.c -@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap); - */ - void *kmap_atomic_prot(struct page *page, pgprot_t prot) - { -+ pte_t pte = mk_pte(page, prot); - unsigned long vaddr; - int idx, type; - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - - if (!PageHighMem(page)) -@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); -- set_pte(kmap_pte-idx, mk_pte(page, prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte-idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - arch_flush_lazy_mmu_mode(); -@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr) - #endif - - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(__kunmap_atomic); - -diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c -index b3294d36769d..d5a48210d0f6 100644 ---- a/arch/x86/mm/iomap_32.c -+++ b/arch/x86/mm/iomap_32.c -@@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(iomap_free); - - void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - { -+ pte_t pte = pfn_pte(pfn, prot); - unsigned long vaddr; - int idx, type; - -@@ -68,7 +69,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte - idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -119,6 +123,9 @@ iounmap_atomic(void __iomem *kvaddr) - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - } -diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index 0690679832d4..1ac89e4718bf 100644 ---- a/include/linux/highmem.h -+++ b/include/linux/highmem.h -@@ -66,7 +66,7 @@ static inline void kunmap(struct page *page) - - static inline void *kmap_atomic(struct page *page) - { -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - return page_address(page); - } -@@ -75,7 +75,7 @@ static inline void *kmap_atomic(struct page *page) - static inline void __kunmap_atomic(void *addr) - { - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - - #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) -@@ -87,32 +87,51 @@ static inline void __kunmap_atomic(void *addr) - - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -+#ifndef CONFIG_PREEMPT_RT_FULL - DECLARE_PER_CPU(int, __kmap_atomic_idx); -+#endif - - static inline int kmap_atomic_idx_push(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - --#ifdef CONFIG_DEBUG_HIGHMEM -+# ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); --#endif -+# endif - return idx; -+#else -+ current->kmap_idx++; -+ BUG_ON(current->kmap_idx > KM_TYPE_NR); -+ return current->kmap_idx - 1; -+#endif - } - - static inline int kmap_atomic_idx(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - return __this_cpu_read(__kmap_atomic_idx) - 1; -+#else -+ return current->kmap_idx - 1; -+#endif - } - - static inline void kmap_atomic_idx_pop(void) - { --#ifdef CONFIG_DEBUG_HIGHMEM -+#ifndef CONFIG_PREEMPT_RT_FULL -+# ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); --#else -+# else - __this_cpu_dec(__kmap_atomic_idx); -+# endif -+#else -+ current->kmap_idx--; -+# ifdef CONFIG_DEBUG_HIGHMEM -+ BUG_ON(current->kmap_idx < 0); -+# endif - #endif - } - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 756fed8f5994..dc668524ccff 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - - /* task_struct member predeclarations (sorted alphabetically): */ - struct audit_context; -@@ -1214,6 +1215,12 @@ struct task_struct { - int softirq_nestcnt; - unsigned int softirqs_raised; - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 -+ int kmap_idx; -+ pte_t kmap_pte[KM_TYPE_NR]; -+# endif -+#endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; - #endif -diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h -index efe79c1cdd47..128a8489047d 100644 ---- a/include/linux/uaccess.h -+++ b/include/linux/uaccess.h -@@ -185,6 +185,7 @@ static __always_inline void pagefault_disabled_dec(void) - */ - static inline void pagefault_disable(void) - { -+ migrate_disable(); - pagefault_disabled_inc(); - /* - * make sure to have issued the store before a pagefault -@@ -201,6 +202,7 @@ static inline void pagefault_enable(void) - */ - barrier(); - pagefault_disabled_dec(); -+ migrate_enable(); - } - - /* -diff --git a/mm/highmem.c b/mm/highmem.c -index 59db3223a5d6..22aa3ddbd87b 100644 ---- a/mm/highmem.c -+++ b/mm/highmem.c -@@ -30,10 +30,11 @@ - #include - #include - -- -+#ifndef CONFIG_PREEMPT_RT_FULL - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - DEFINE_PER_CPU(int, __kmap_atomic_idx); - #endif -+#endif - - /* - * Virtual_count is not a pure "count". -@@ -108,8 +109,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) - unsigned long totalhigh_pages __read_mostly; - EXPORT_SYMBOL(totalhigh_pages); - -- -+#ifndef CONFIG_PREEMPT_RT_FULL - EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); -+#endif - - unsigned int nr_free_highpages (void) - { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch b/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch deleted file mode 100644 index 424232e40..000000000 --- a/kernel/patches-4.19.x-rt/0217-x86-highmem-Add-a-already-used-pte-check.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 4f31bac9ada3535606eaa6e9031a6a9e747afc5b Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 11 Mar 2013 17:09:55 +0100 -Subject: [PATCH 217/328] x86/highmem: Add a "already used pte" check - -This is a copy from kmap_atomic_prot(). - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/mm/iomap_32.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c -index d5a48210d0f6..c0ec8d430c02 100644 ---- a/arch/x86/mm/iomap_32.c -+++ b/arch/x86/mm/iomap_32.c -@@ -69,6 +69,8 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -+ WARN_ON(!pte_none(*(kmap_pte - idx))); -+ - #ifdef CONFIG_PREEMPT_RT_FULL - current->kmap_pte[type] = pte; - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch b/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch deleted file mode 100644 index c4dfcaf1b..000000000 --- a/kernel/patches-4.19.x-rt/0218-arm-highmem-Flush-tlb-on-unmap.patch +++ /dev/null @@ -1,33 +0,0 @@ -From c0391198894b60a1860f751474ac15e89392d396 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 11 Mar 2013 21:37:27 +0100 -Subject: [PATCH 218/328] arm/highmem: Flush tlb on unmap - -The tlb should be flushed on unmap and thus make the mapping entry -invalid. This is only done in the non-debug case which does not look -right. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm/mm/highmem.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c -index d02f8187b1cc..eb4b225d28c9 100644 ---- a/arch/arm/mm/highmem.c -+++ b/arch/arm/mm/highmem.c -@@ -112,10 +112,10 @@ void __kunmap_atomic(void *kvaddr) - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(idx)); -- set_fixmap_pte(idx, __pte(0)); - #else - (void) idx; /* to kill a warning */ - #endif -+ set_fixmap_pte(idx, __pte(0)); - kmap_atomic_idx_pop(); - } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - /* this address was obtained through kmap_high_get() */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch b/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch deleted file mode 100644 index cd32ff1c4..000000000 --- a/kernel/patches-4.19.x-rt/0219-arm-Enable-highmem-for-rt.patch +++ /dev/null @@ -1,183 +0,0 @@ -From 9f06896a55a5320c0037ed6d57ccc188f39b46d9 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 13 Feb 2013 11:03:11 +0100 -Subject: [PATCH 219/328] arm: Enable highmem for rt - -fixup highmem for ARM. - -Signed-off-by: Thomas Gleixner ---- - arch/arm/include/asm/switch_to.h | 8 +++++ - arch/arm/mm/highmem.c | 56 +++++++++++++++++++++++++++----- - include/linux/highmem.h | 1 + - 3 files changed, 57 insertions(+), 8 deletions(-) - -diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h -index d3e937dcee4d..6ab96a2ce1f8 100644 ---- a/arch/arm/include/asm/switch_to.h -+++ b/arch/arm/include/asm/switch_to.h -@@ -4,6 +4,13 @@ - - #include - -+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - /* - * For v7 SMP cores running a preemptible kernel we may be pre-empted - * during a TLB maintenance operation, so execute an inner-shareable dsb -@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info - #define switch_to(prev,next,last) \ - do { \ - __complete_pending_tlbi(); \ -+ switch_kmaps(prev, next); \ - last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ - } while (0) - -diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c -index eb4b225d28c9..542692dbd40a 100644 ---- a/arch/arm/mm/highmem.c -+++ b/arch/arm/mm/highmem.c -@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr) - return *ptep; - } - -+static unsigned int fixmap_idx(int type) -+{ -+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+} -+ - void *kmap(struct page *page) - { - might_sleep(); -@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap); - - void *kmap_atomic(struct page *page) - { -+ pte_t pte = mk_pte(page, kmap_prot); - unsigned int idx; - unsigned long vaddr; - void *kmap; - int type; - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); -@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page) - - type = kmap_atomic_idx_push(); - -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - vaddr = __fix_to_virt(idx); - #ifdef CONFIG_DEBUG_HIGHMEM - /* -@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page) - * in place, so the contained TLB flush ensures the TLB is updated - * with the new mapping. - */ -- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -@@ -106,10 +115,13 @@ void __kunmap_atomic(void *kvaddr) - - if (kvaddr >= (void *)FIXADDR_START) { - type = kmap_atomic_idx(); -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - - if (cache_is_vivt()) - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(idx)); - #else -@@ -122,28 +134,56 @@ void __kunmap_atomic(void *kvaddr) - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); - } - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(__kunmap_atomic); - - void *kmap_atomic_pfn(unsigned long pfn) - { -+ pte_t pte = pfn_pte(pfn, kmap_prot); - unsigned long vaddr; - int idx, type; - struct page *page = pfn_to_page(pfn); - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - type = kmap_atomic_idx_push(); -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - vaddr = __fix_to_virt(idx); - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(get_fixmap_pte(vaddr))); - #endif -- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -+#if defined CONFIG_PREEMPT_RT_FULL -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = fixmap_idx(i); -+ -+ set_fixmap_pte(idx, __pte(0)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = fixmap_idx(i); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_fixmap_pte(idx, next_p->kmap_pte[i]); -+ } -+} -+#endif -diff --git a/include/linux/highmem.h b/include/linux/highmem.h -index 1ac89e4718bf..eaa2ef9bc10e 100644 ---- a/include/linux/highmem.h -+++ b/include/linux/highmem.h -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - - #include - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch b/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch deleted file mode 100644 index 32d953b83..000000000 --- a/kernel/patches-4.19.x-rt/0220-scsi-fcoe-Make-RT-aware.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 340c12c894a65dfd465a251f6f7f795746c803a0 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sat, 12 Nov 2011 14:00:48 +0100 -Subject: [PATCH 220/328] scsi/fcoe: Make RT aware. - -Do not disable preemption while taking sleeping locks. All user look safe -for migrate_diable() only. - -Signed-off-by: Thomas Gleixner ---- - drivers/scsi/fcoe/fcoe.c | 16 ++++++++-------- - drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++-- - drivers/scsi/libfc/fc_exch.c | 4 ++-- - 3 files changed, 12 insertions(+), 12 deletions(-) - -diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c -index 6768b2e8148a..c20f51af6bdf 100644 ---- a/drivers/scsi/fcoe/fcoe.c -+++ b/drivers/scsi/fcoe/fcoe.c -@@ -1459,11 +1459,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, - static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) - { - struct fcoe_percpu_s *fps; -- int rc; -+ int rc, cpu = get_cpu_light(); - -- fps = &get_cpu_var(fcoe_percpu); -+ fps = &per_cpu(fcoe_percpu, cpu); - rc = fcoe_get_paged_crc_eof(skb, tlen, fps); -- put_cpu_var(fcoe_percpu); -+ put_cpu_light(); - - return rc; - } -@@ -1650,11 +1650,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, - return 0; - } - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -+ stats = per_cpu_ptr(lport->stats, get_cpu_light()); - stats->InvalidCRCCount++; - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); -- put_cpu(); -+ put_cpu_light(); - return -EINVAL; - } - -@@ -1697,7 +1697,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) - */ - hp = (struct fcoe_hdr *) skb_network_header(skb); - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -+ stats = per_cpu_ptr(lport->stats, get_cpu_light()); - if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { - if (stats->ErrorFrames < 5) - printk(KERN_WARNING "fcoe: FCoE version " -@@ -1729,13 +1729,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) - goto drop; - - if (!fcoe_filter_frames(lport, fp)) { -- put_cpu(); -+ put_cpu_light(); - fc_exch_recv(lport, fp); - return; - } - drop: - stats->ErrorFrames++; -- put_cpu(); -+ put_cpu_light(); - kfree_skb(skb); - } - -diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c -index 24cbd0a2cc69..ccf60801fe9d 100644 ---- a/drivers/scsi/fcoe/fcoe_ctlr.c -+++ b/drivers/scsi/fcoe/fcoe_ctlr.c -@@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) - - INIT_LIST_HEAD(&del_list); - -- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); -+ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); - - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; -@@ -874,7 +874,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) - sel_time = fcf->time; - } - } -- put_cpu(); -+ put_cpu_light(); - - list_for_each_entry_safe(fcf, next, &del_list, list) { - /* Removes fcf from current list */ -diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c -index 6ba257cbc6d9..d2398a19f84c 100644 ---- a/drivers/scsi/libfc/fc_exch.c -+++ b/drivers/scsi/libfc/fc_exch.c -@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, - } - memset(ep, 0, sizeof(*ep)); - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = per_cpu_ptr(mp->pool, cpu); - spin_lock_bh(&pool->lock); -- put_cpu(); -+ put_cpu_light(); - - /* peek cache of free slot */ - if (pool->left != FC_XID_UNKNOWN) { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch b/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch deleted file mode 100644 index 383f2ae4e..000000000 --- a/kernel/patches-4.19.x-rt/0221-x86-crypto-Reduce-preempt-disabled-regions.patch +++ /dev/null @@ -1,117 +0,0 @@ -From 9fcdeeff46696ee50f5a524dd11bc61aad38cfc1 Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Mon, 14 Nov 2011 18:19:27 +0100 -Subject: [PATCH 221/328] x86: crypto: Reduce preempt disabled regions - -Restrict the preempt disabled regions to the actual floating point -operations and enable preemption for the administrative actions. - -This is necessary on RT to avoid that kfree and other operations are -called with preemption disabled. - -Reported-and-tested-by: Carsten Emde -Signed-off-by: Peter Zijlstra - -Signed-off-by: Thomas Gleixner ---- - arch/x86/crypto/aesni-intel_glue.c | 22 ++++++++++++---------- - 1 file changed, 12 insertions(+), 10 deletions(-) - -diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c -index 917f25e4d0a8..58d8c03fc32d 100644 ---- a/arch/x86/crypto/aesni-intel_glue.c -+++ b/arch/x86/crypto/aesni-intel_glue.c -@@ -434,14 +434,14 @@ static int ecb_encrypt(struct skcipher_request *req) - - err = skcipher_walk_virt(&walk, req, true); - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = skcipher_walk_done(&walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -456,14 +456,14 @@ static int ecb_decrypt(struct skcipher_request *req) - - err = skcipher_walk_virt(&walk, req, true); - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = skcipher_walk_done(&walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -478,14 +478,14 @@ static int cbc_encrypt(struct skcipher_request *req) - - err = skcipher_walk_virt(&walk, req, true); - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = skcipher_walk_done(&walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -500,14 +500,14 @@ static int cbc_decrypt(struct skcipher_request *req) - - err = skcipher_walk_virt(&walk, req, true); - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = skcipher_walk_done(&walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -557,18 +557,20 @@ static int ctr_crypt(struct skcipher_request *req) - - err = skcipher_walk_virt(&walk, req, true); - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { -+ kernel_fpu_begin(); - aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = skcipher_walk_done(&walk, nbytes); - } - if (walk.nbytes) { -+ kernel_fpu_begin(); - ctr_crypt_final(ctx, &walk); -+ kernel_fpu_end(); - err = skcipher_walk_done(&walk, 0); - } -- kernel_fpu_end(); - - return err; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch b/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch deleted file mode 100644 index bd809109f..000000000 --- a/kernel/patches-4.19.x-rt/0222-crypto-Reduce-preempt-disabled-regions-more-algos.patch +++ /dev/null @@ -1,262 +0,0 @@ -From 6cb63cbf25a36339685aae48f066fc80cbf5cd7f Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 21 Feb 2014 17:24:04 +0100 -Subject: [PATCH 222/328] crypto: Reduce preempt disabled regions, more algos - -Don Estabrook reported -| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() -| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2462 migrate_enable+0x17b/0x200() -| kernel: WARNING: CPU: 3 PID: 865 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100() - -and his backtrace showed some crypto functions which looked fine. - -The problem is the following sequence: - -glue_xts_crypt_128bit() -{ - blkcipher_walk_virt(); /* normal migrate_disable() */ - - glue_fpu_begin(); /* get atomic */ - - while (nbytes) { - __glue_xts_crypt_128bit(); - blkcipher_walk_done(); /* with nbytes = 0, migrate_enable() - * while we are atomic */ - }; - glue_fpu_end() /* no longer atomic */ -} - -and this is why the counter get out of sync and the warning is printed. -The other problem is that we are non-preemptible between -glue_fpu_begin() and glue_fpu_end() and the latency grows. To fix this, -I shorten the FPU off region and ensure blkcipher_walk_done() is called -with preemption enabled. This might hurt the performance because we now -enable/disable the FPU state more often but we gain lower latency and -the bug is gone. - - -Reported-by: Don Estabrook -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/crypto/cast5_avx_glue.c | 21 +++++++++------------ - arch/x86/crypto/glue_helper.c | 31 ++++++++++++++++--------------- - 2 files changed, 25 insertions(+), 27 deletions(-) - -diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c -index 41034745d6a2..d4bf7fc02ee7 100644 ---- a/arch/x86/crypto/cast5_avx_glue.c -+++ b/arch/x86/crypto/cast5_avx_glue.c -@@ -61,7 +61,7 @@ static inline void cast5_fpu_end(bool fpu_enabled) - - static int ecb_crypt(struct skcipher_request *req, bool enc) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; -@@ -76,7 +76,7 @@ static int ecb_crypt(struct skcipher_request *req, bool enc) - u8 *wsrc = walk.src.virt.addr; - u8 *wdst = walk.dst.virt.addr; - -- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { -@@ -105,10 +105,9 @@ static int ecb_crypt(struct skcipher_request *req, bool enc) - } while (nbytes >= bsize); - - done: -+ cast5_fpu_end(fpu_enabled); - err = skcipher_walk_done(&walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -212,7 +211,7 @@ static int cbc_decrypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct skcipher_walk walk; - unsigned int nbytes; - int err; -@@ -220,12 +219,11 @@ static int cbc_decrypt(struct skcipher_request *req) - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes)) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); - nbytes = __cbc_decrypt(ctx, &walk); -+ cast5_fpu_end(fpu_enabled); - err = skcipher_walk_done(&walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -292,7 +290,7 @@ static int ctr_crypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm); -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct skcipher_walk walk; - unsigned int nbytes; - int err; -@@ -300,13 +298,12 @@ static int ctr_crypt(struct skcipher_request *req) - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, &walk, nbytes); - nbytes = __ctr_crypt(&walk, ctx); -+ cast5_fpu_end(fpu_enabled); - err = skcipher_walk_done(&walk, nbytes); - } - -- cast5_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - ctr_crypt_final(&walk, ctx); - err = skcipher_walk_done(&walk, 0); -diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c -index a78ef99a9981..dac489a1c4da 100644 ---- a/arch/x86/crypto/glue_helper.c -+++ b/arch/x86/crypto/glue_helper.c -@@ -38,7 +38,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; -- bool fpu_enabled = false; -+ bool fpu_enabled; - unsigned int nbytes; - int err; - -@@ -51,7 +51,7 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - unsigned int i; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- &walk, fpu_enabled, nbytes); -+ &walk, false, nbytes); - for (i = 0; i < gctx->num_funcs; i++) { - func_bytes = bsize * gctx->funcs[i].num_blocks; - -@@ -69,10 +69,9 @@ int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, - if (nbytes < bsize) - break; - } -+ glue_fpu_end(fpu_enabled); - err = skcipher_walk_done(&walk, nbytes); - } -- -- glue_fpu_end(fpu_enabled); - return err; - } - EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); -@@ -115,7 +114,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; -- bool fpu_enabled = false; -+ bool fpu_enabled; - unsigned int nbytes; - int err; - -@@ -129,7 +128,7 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - u128 last_iv; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- &walk, fpu_enabled, nbytes); -+ &walk, false, nbytes); - /* Start of the last block. */ - src += nbytes / bsize - 1; - dst += nbytes / bsize - 1; -@@ -161,10 +160,10 @@ int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, - done: - u128_xor(dst, dst, (u128 *)walk.iv); - *(u128 *)walk.iv = last_iv; -+ glue_fpu_end(fpu_enabled); - err = skcipher_walk_done(&walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); -@@ -175,7 +174,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; -- bool fpu_enabled = false; -+ bool fpu_enabled; - unsigned int nbytes; - int err; - -@@ -189,7 +188,7 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, - le128 ctrblk; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- &walk, fpu_enabled, nbytes); -+ &walk, false, nbytes); - - be128_to_le128(&ctrblk, (be128 *)walk.iv); - -@@ -213,11 +212,10 @@ int glue_ctr_req_128bit(const struct common_glue_ctx *gctx, - } - - le128_to_be128((be128 *)walk.iv, &ctrblk); -+ glue_fpu_end(fpu_enabled); - err = skcipher_walk_done(&walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); -- - if (nbytes) { - le128 ctrblk; - u128 tmp; -@@ -278,7 +276,7 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, - { - const unsigned int bsize = 128 / 8; - struct skcipher_walk walk; -- bool fpu_enabled = false; -+ bool fpu_enabled; - unsigned int nbytes; - int err; - -@@ -289,21 +287,24 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx, - - /* set minimum length to bsize, for tweak_fn */ - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- &walk, fpu_enabled, -+ &walk, false, - nbytes < bsize ? bsize : nbytes); - - /* calculate first value of T */ - tweak_fn(tweak_ctx, walk.iv, walk.iv); - - while (nbytes) { -+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -+ &walk, fpu_enabled, -+ nbytes < bsize ? bsize : nbytes); - nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk); - -+ glue_fpu_end(fpu_enabled); -+ fpu_enabled = false; - err = skcipher_walk_done(&walk, nbytes); - nbytes = walk.nbytes; - } - -- glue_fpu_end(fpu_enabled); -- - return err; - } - EXPORT_SYMBOL_GPL(glue_xts_req_128bit); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch b/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch deleted file mode 100644 index 3071bfa9d..000000000 --- a/kernel/patches-4.19.x-rt/0223-crypto-limit-more-FPU-enabled-sections.patch +++ /dev/null @@ -1,107 +0,0 @@ -From c69a6e5b35d956f1788a112244952e84fc27296a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 30 Nov 2017 13:40:10 +0100 -Subject: [PATCH 223/328] crypto: limit more FPU-enabled sections -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Those crypto drivers use SSE/AVX/… for their crypto work and in order to -do so in kernel they need to enable the "FPU" in kernel mode which -disables preemption. -There are two problems with the way they are used: -- the while loop which processes X bytes may create latency spikes and - should be avoided or limited. -- the cipher-walk-next part may allocate/free memory and may use - kmap_atomic(). - -The whole kernel_fpu_begin()/end() processing isn't probably that cheap. -It most likely makes sense to process as much of those as possible in one -go. The new *_fpu_sched_rt() schedules only if a RT task is pending. - -Probably we should measure the performance those ciphers in pure SW -mode and with this optimisations to see if it makes sense to keep them -for RT. - -This kernel_fpu_resched() makes the code more preemptible which might hurt -performance. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/crypto/chacha20_glue.c | 9 +++++---- - arch/x86/include/asm/fpu/api.h | 1 + - arch/x86/kernel/fpu/core.c | 12 ++++++++++++ - 3 files changed, 18 insertions(+), 4 deletions(-) - -diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c -index dce7c5d39c2f..6194160b7fbc 100644 ---- a/arch/x86/crypto/chacha20_glue.c -+++ b/arch/x86/crypto/chacha20_glue.c -@@ -81,23 +81,24 @@ static int chacha20_simd(struct skcipher_request *req) - - crypto_chacha20_init(state, ctx, walk.iv); - -- kernel_fpu_begin(); -- - while (walk.nbytes >= CHACHA20_BLOCK_SIZE) { -+ kernel_fpu_begin(); -+ - chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, - rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE)); -+ kernel_fpu_end(); - err = skcipher_walk_done(&walk, - walk.nbytes % CHACHA20_BLOCK_SIZE); - } - - if (walk.nbytes) { -+ kernel_fpu_begin(); - chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); -+ kernel_fpu_end(); - err = skcipher_walk_done(&walk, 0); - } - -- kernel_fpu_end(); -- - return err; - } - -diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h -index b56d504af654..e51c7094075d 100644 ---- a/arch/x86/include/asm/fpu/api.h -+++ b/arch/x86/include/asm/fpu/api.h -@@ -20,6 +20,7 @@ - */ - extern void kernel_fpu_begin(void); - extern void kernel_fpu_end(void); -+extern void kernel_fpu_resched(void); - extern bool irq_fpu_usable(void); - - /* -diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c -index 2e5003fef51a..768c53767bb2 100644 ---- a/arch/x86/kernel/fpu/core.c -+++ b/arch/x86/kernel/fpu/core.c -@@ -136,6 +136,18 @@ void kernel_fpu_end(void) - } - EXPORT_SYMBOL_GPL(kernel_fpu_end); - -+void kernel_fpu_resched(void) -+{ -+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); -+ -+ if (should_resched(PREEMPT_OFFSET)) { -+ kernel_fpu_end(); -+ cond_resched(); -+ kernel_fpu_begin(); -+ } -+} -+EXPORT_SYMBOL_GPL(kernel_fpu_resched); -+ - /* - * Save the FPU state (mark it for reload if necessary): - * --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch deleted file mode 100644 index 4caace242..000000000 --- a/kernel/patches-4.19.x-rt/0224-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 08367a7b181f94abaeb6bb4c827db639c56e8ea6 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Wed, 11 Jul 2018 17:14:47 +0200 -Subject: [PATCH 224/328] crypto: scompress - serialize RT percpu scratch - buffer access with a local lock - -| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974 -| in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test -| Preemption disabled at: -| [] scomp_acomp_comp_decomp+0x34/0x1a0 -| CPU: 21 PID: 1401 Comm: cryptomgr_test Tainted: G W 4.16.18-rt9-rt #1 -| Hardware name: www.cavium.com crb-1s/crb-1s, BIOS 0.3 Apr 25 2017 -| Call trace: -| dump_backtrace+0x0/0x1c8 -| show_stack+0x24/0x30 -| dump_stack+0xac/0xe8 -| ___might_sleep+0x124/0x188 -| rt_spin_lock+0x40/0x88 -| zip_load_instr+0x44/0x170 [thunderx_zip] -| zip_deflate+0x184/0x378 [thunderx_zip] -| zip_compress+0xb0/0x130 [thunderx_zip] -| zip_scomp_compress+0x48/0x60 [thunderx_zip] -| scomp_acomp_comp_decomp+0xd8/0x1a0 -| scomp_acomp_compress+0x24/0x30 -| test_acomp+0x15c/0x558 -| alg_test_comp+0xc0/0x128 -| alg_test.part.6+0x120/0x2c0 -| alg_test+0x6c/0xa0 -| cryptomgr_test+0x50/0x58 -| kthread+0x134/0x138 -| ret_from_fork+0x10/0x18 - -Mainline disables preemption to serialize percpu scratch buffer access, -causing the splat above. Serialize with a local lock for RT instead. - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - crypto/scompress.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/crypto/scompress.c b/crypto/scompress.c -index 968bbcf65c94..c2f0077e0801 100644 ---- a/crypto/scompress.c -+++ b/crypto/scompress.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches; - static void * __percpu *scomp_dst_scratches; - static int scomp_scratch_users; - static DEFINE_MUTEX(scomp_lock); -+static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock); - - #ifdef CONFIG_NET - static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) -@@ -146,7 +148,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) - void **tfm_ctx = acomp_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void **ctx = acomp_request_ctx(req); -- const int cpu = get_cpu(); -+ const int cpu = local_lock_cpu(scomp_scratches_lock); - u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); - u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); - int ret; -@@ -181,7 +183,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) - 1); - } - out: -- put_cpu(); -+ local_unlock_cpu(scomp_scratches_lock); - return ret; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch deleted file mode 100644 index 8178995cb..000000000 --- a/kernel/patches-4.19.x-rt/0225-crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 850e714cfa9823eea1841d36cb29b28434d6ac2d Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 26 Jul 2018 18:52:00 +0200 -Subject: [PATCH 225/328] crypto: cryptd - add a lock instead - preempt_disable/local_bh_disable - -cryptd has a per-CPU lock which protected with local_bh_disable() and -preempt_disable(). -Add an explicit spin_lock to make the locking context more obvious and -visible to lockdep. Since it is a per-CPU lock, there should be no lock -contention on the actual spinlock. -There is a small race-window where we could be migrated to another CPU -after the cpu_queue has been obtain. This is not a problem because the -actual ressource is protected by the spinlock. - -Signed-off-by: Sebastian Andrzej Siewior ---- - crypto/cryptd.c | 19 +++++++++---------- - 1 file changed, 9 insertions(+), 10 deletions(-) - -diff --git a/crypto/cryptd.c b/crypto/cryptd.c -index e0c8e907b086..e079f9a70201 100644 ---- a/crypto/cryptd.c -+++ b/crypto/cryptd.c -@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); - struct cryptd_cpu_queue { - struct crypto_queue queue; - struct work_struct work; -+ spinlock_t qlock; - }; - - struct cryptd_queue { -@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, - cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); - crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); - INIT_WORK(&cpu_queue->work, cryptd_queue_worker); -+ spin_lock_init(&cpu_queue->qlock); - } - pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); - return 0; -@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, - struct cryptd_cpu_queue *cpu_queue; - atomic_t *refcnt; - -- cpu = get_cpu(); -- cpu_queue = this_cpu_ptr(queue->cpu_queue); -+ cpu_queue = raw_cpu_ptr(queue->cpu_queue); -+ spin_lock_bh(&cpu_queue->qlock); -+ cpu = smp_processor_id(); -+ - err = crypto_enqueue_request(&cpu_queue->queue, request); - - refcnt = crypto_tfm_ctx(request->tfm); -@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, - atomic_inc(refcnt); - - out_put_cpu: -- put_cpu(); -+ spin_unlock_bh(&cpu_queue->qlock); - - return err; - } -@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work) - cpu_queue = container_of(work, struct cryptd_cpu_queue, work); - /* - * Only handle one request at a time to avoid hogging crypto workqueue. -- * preempt_disable/enable is used to prevent being preempted by -- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent -- * cryptd_enqueue_request() being accessed from software interrupts. - */ -- local_bh_disable(); -- preempt_disable(); -+ spin_lock_bh(&cpu_queue->qlock); - backlog = crypto_get_backlog(&cpu_queue->queue); - req = crypto_dequeue_request(&cpu_queue->queue); -- preempt_enable(); -- local_bh_enable(); -+ spin_unlock_bh(&cpu_queue->qlock); - - if (!req) - return; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch b/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch deleted file mode 100644 index 3b6048164..000000000 --- a/kernel/patches-4.19.x-rt/0226-panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch +++ /dev/null @@ -1,33 +0,0 @@ -From faa87d948486f3da759dc841ad7e5d983e94691a Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 14 Jul 2015 14:26:34 +0200 -Subject: [PATCH 226/328] panic: skip get_random_bytes for RT_FULL in - init_oops_id - -Disable on -RT. If this is invoked from irq-context we will have problems -to acquire the sleeping lock. - -Signed-off-by: Thomas Gleixner ---- - kernel/panic.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/kernel/panic.c b/kernel/panic.c -index 8138a676fb7d..fa114be5c30f 100644 ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -480,9 +480,11 @@ static u64 oops_id; - - static int init_oops_id(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - if (!oops_id) - get_random_bytes(&oops_id, sizeof(oops_id)); - else -+#endif - oops_id++; - - return 0; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch b/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch deleted file mode 100644 index 7265281ae..000000000 --- a/kernel/patches-4.19.x-rt/0227-x86-stackprotector-Avoid-random-pool-on-rt.patch +++ /dev/null @@ -1,50 +0,0 @@ -From fdddc5f9de75c797965fbb4c41826941313f97e3 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 16 Dec 2010 14:25:18 +0100 -Subject: [PATCH 227/328] x86: stackprotector: Avoid random pool on rt - -CPU bringup calls into the random pool to initialize the stack -canary. During boot that works nicely even on RT as the might sleep -checks are disabled. During CPU hotplug the might sleep checks -trigger. Making the locks in random raw is a major PITA, so avoid the -call on RT is the only sensible solution. This is basically the same -randomness which we get during boot where the random pool has no -entropy and we rely on the TSC randomnness. - -Reported-by: Carsten Emde -Signed-off-by: Thomas Gleixner ---- - arch/x86/include/asm/stackprotector.h | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h -index 8ec97a62c245..7bc85841fc56 100644 ---- a/arch/x86/include/asm/stackprotector.h -+++ b/arch/x86/include/asm/stackprotector.h -@@ -60,7 +60,7 @@ - */ - static __always_inline void boot_init_stack_canary(void) - { -- u64 canary; -+ u64 uninitialized_var(canary); - u64 tsc; - - #ifdef CONFIG_X86_64 -@@ -71,8 +71,14 @@ static __always_inline void boot_init_stack_canary(void) - * of randomness. The TSC only matters for very early init, - * there it already has some randomness on most systems. Later - * on during the bootup the random pool has true entropy too. -+ * For preempt-rt we need to weaken the randomness a bit, as -+ * we can't call into the random generator from atomic context -+ * due to locking constraints. We just leave canary -+ * uninitialized and use the TSC based randomness on top of it. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - get_random_bytes(&canary, sizeof(canary)); -+#endif - tsc = rdtsc(); - canary += tsc + (tsc << 32UL); - canary &= CANARY_MASK; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch b/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch deleted file mode 100644 index e95dd890b..000000000 --- a/kernel/patches-4.19.x-rt/0228-random-Make-it-work-on-rt.patch +++ /dev/null @@ -1,166 +0,0 @@ -From 5f480f8b688aa96212f301664644d53b0d3a3e95 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 21 Aug 2012 20:38:50 +0200 -Subject: [PATCH 228/328] random: Make it work on rt - -Delegate the random insertion to the forced threaded interrupt -handler. Store the return IP of the hard interrupt handler in the irq -descriptor and feed it into the random generator as a source of -entropy. - -Signed-off-by: Thomas Gleixner ---- - drivers/char/random.c | 11 +++++------ - drivers/hv/hv.c | 4 +++- - drivers/hv/vmbus_drv.c | 4 +++- - include/linux/irqdesc.h | 1 + - include/linux/random.h | 2 +- - kernel/irq/handle.c | 8 +++++++- - kernel/irq/manage.c | 6 ++++++ - 7 files changed, 26 insertions(+), 10 deletions(-) - -diff --git a/drivers/char/random.c b/drivers/char/random.c -index 28b110cd3977..d15dd08a92e2 100644 ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -1232,28 +1232,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) - return *ptr; - } - --void add_interrupt_randomness(int irq, int irq_flags) -+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) - { - struct entropy_store *r; - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); -- struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - __u32 c_high, j_high; -- __u64 ip; - unsigned long seed; - int credit = 0; - - if (cycles == 0) -- cycles = get_reg(fast_pool, regs); -+ cycles = get_reg(fast_pool, NULL); - c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; - j_high = (sizeof(now) > 4) ? now >> 32 : 0; - fast_pool->pool[0] ^= cycles ^ j_high ^ irq; - fast_pool->pool[1] ^= now ^ c_high; -- ip = regs ? instruction_pointer(regs) : _RET_IP_; -+ if (!ip) -+ ip = _RET_IP_; - fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : -- get_reg(fast_pool, regs); -+ get_reg(fast_pool, NULL); - - fast_mix(fast_pool); - add_interrupt_bench(cycles); -diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c -index 12bc9fa21111..278f03f50147 100644 ---- a/drivers/hv/hv.c -+++ b/drivers/hv/hv.c -@@ -112,10 +112,12 @@ int hv_post_message(union hv_connection_id connection_id, - static void hv_stimer0_isr(void) - { - struct hv_per_cpu_context *hv_cpu; -+ struct pt_regs *regs = get_irq_regs(); -+ u64 ip = regs ? instruction_pointer(regs) : 0; - - hv_cpu = this_cpu_ptr(hv_context.cpu_context); - hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt); -- add_interrupt_randomness(stimer0_vector, 0); -+ add_interrupt_randomness(stimer0_vector, 0, ip); - } - - static int hv_ce_set_next_event(unsigned long delta, -diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c -index 9aa18f387a34..39aaa14993cc 100644 ---- a/drivers/hv/vmbus_drv.c -+++ b/drivers/hv/vmbus_drv.c -@@ -1042,6 +1042,8 @@ static void vmbus_isr(void) - void *page_addr = hv_cpu->synic_event_page; - struct hv_message *msg; - union hv_synic_event_flags *event; -+ struct pt_regs *regs = get_irq_regs(); -+ u64 ip = regs ? instruction_pointer(regs) : 0; - bool handled = false; - - if (unlikely(page_addr == NULL)) -@@ -1085,7 +1087,7 @@ static void vmbus_isr(void) - tasklet_schedule(&hv_cpu->msg_dpc); - } - -- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); -+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); - } - - /* -diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h -index 875c41b23f20..ff5eb8d1ede4 100644 ---- a/include/linux/irqdesc.h -+++ b/include/linux/irqdesc.h -@@ -71,6 +71,7 @@ struct irq_desc { - unsigned int irqs_unhandled; - atomic_t threads_handled; - int threads_handled_last; -+ u64 random_ip; - raw_spinlock_t lock; - struct cpumask *percpu_enabled; - const struct cpumask *percpu_affinity; -diff --git a/include/linux/random.h b/include/linux/random.h -index 445a0ea4ff49..a7b7d9f97580 100644 ---- a/include/linux/random.h -+++ b/include/linux/random.h -@@ -32,7 +32,7 @@ static inline void add_latent_entropy(void) {} - - extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value) __latent_entropy; --extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; -+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy; - - extern void get_random_bytes(void *buf, int nbytes); - extern int wait_for_random_bytes(void); -diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c -index 38554bc35375..06a80bbf78af 100644 ---- a/kernel/irq/handle.c -+++ b/kernel/irq/handle.c -@@ -185,10 +185,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) - { - irqreturn_t retval; - unsigned int flags = 0; -+ struct pt_regs *regs = get_irq_regs(); -+ u64 ip = regs ? instruction_pointer(regs) : 0; - - retval = __handle_irq_event_percpu(desc, &flags); - -- add_interrupt_randomness(desc->irq_data.irq, flags); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ desc->random_ip = ip; -+#else -+ add_interrupt_randomness(desc->irq_data.irq, flags, ip); -+#endif - - if (!noirqdebug) - note_interrupt(desc, retval); -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 5701774a6d71..ce86341a9e19 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -1110,6 +1110,12 @@ static int irq_thread(void *data) - if (action_ret == IRQ_WAKE_THREAD) - irq_wake_secondary(desc, action); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_disable(); -+ add_interrupt_randomness(action->irq, 0, -+ desc->random_ip ^ (unsigned long) action); -+ migrate_enable(); -+#endif - wake_threads_waitq(desc); - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch b/kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch deleted file mode 100644 index 65f3311ee..000000000 --- a/kernel/patches-4.19.x-rt/0229-cpu-hotplug-Implement-CPU-pinning.patch +++ /dev/null @@ -1,116 +0,0 @@ -From cdd389d22cc7f273f3d955eeacc80bda55db5bb8 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 19 Jul 2017 17:31:20 +0200 -Subject: [PATCH 229/328] cpu/hotplug: Implement CPU pinning - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 1 + - kernel/cpu.c | 38 ++++++++++++++++++++++++++++++++++++++ - 2 files changed, 39 insertions(+) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index dc668524ccff..fc9300d0787d 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -671,6 +671,7 @@ struct task_struct { - #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - int migrate_disable; - int migrate_disable_update; -+ int pinned_on_cpu; - # ifdef CONFIG_SCHED_DEBUG - int migrate_disable_atomic; - # endif -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 328d7bf67d2f..e005e64ae30f 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -75,6 +75,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { - .fail = CPUHP_INVALID, - }; - -+#ifdef CONFIG_HOTPLUG_CPU -+static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ -+ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); -+#endif -+ - #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) - static struct lockdep_map cpuhp_state_up_map = - STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); -@@ -286,7 +291,28 @@ static int cpu_hotplug_disabled; - */ - void pin_current_cpu(void) - { -+ struct rt_rw_lock *cpuhp_pin; -+ unsigned int cpu; -+ int ret; -+ -+again: -+ cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); -+ ret = __read_rt_trylock(cpuhp_pin); -+ if (ret) { -+ current->pinned_on_cpu = smp_processor_id(); -+ return; -+ } -+ cpu = smp_processor_id(); -+ preempt_enable(); -+ -+ __read_rt_lock(cpuhp_pin); - -+ preempt_disable(); -+ if (cpu != smp_processor_id()) { -+ __read_rt_unlock(cpuhp_pin); -+ goto again; -+ } -+ current->pinned_on_cpu = cpu; - } - - /** -@@ -294,6 +320,13 @@ void pin_current_cpu(void) - */ - void unpin_current_cpu(void) - { -+ struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); -+ -+ if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) -+ cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu); -+ -+ current->pinned_on_cpu = -1; -+ __read_rt_unlock(cpuhp_pin); - } - - DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); -@@ -853,6 +886,7 @@ static int take_cpu_down(void *_param) - - static int takedown_cpu(unsigned int cpu) - { -+ struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu); - struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); - int err; - -@@ -865,11 +899,14 @@ static int takedown_cpu(unsigned int cpu) - */ - irq_lock_sparse(); - -+ __write_rt_lock(cpuhp_pin); -+ - /* - * So now all preempt/rcu users must observe !cpu_active(). - */ - err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); - if (err) { -+ __write_rt_unlock(cpuhp_pin); - /* CPU refused to die */ - irq_unlock_sparse(); - /* Unpark the hotplug thread so we can rollback there */ -@@ -888,6 +925,7 @@ static int takedown_cpu(unsigned int cpu) - wait_for_ap_thread(st, false); - BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); - -+ __write_rt_unlock(cpuhp_pin); - /* Interrupts are moved away from the dying cpu, reenable alloc/free */ - irq_unlock_sparse(); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch deleted file mode 100644 index a2627e6b0..000000000 --- a/kernel/patches-4.19.x-rt/0230-sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 9c0e36957da69450658952d7d76b7fe79d8e52ac Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Sun, 19 Aug 2018 08:28:35 +0200 -Subject: [PATCH 230/328] sched: Allow pinned user tasks to be awakened to the - CPU they pinned - -Since commit 7af443ee16976 ("sched/core: Require cpu_active() in -select_task_rq(), for user tasks") select_fallback_rq() will BUG() if -the CPU to which a task has pinned itself and pinned becomes -!cpu_active() while it slept. -The task will continue running on the to-be-removed CPU and will remove -itself from the CPU during takedown_cpu() (while cpuhp_pin_lock will be -acquired) and move to another CPU based on its mask after the -migrate_disable() section has been left. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/core.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index dcf2deedd3f8..6ef0dcea94d7 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -904,7 +904,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) - if (!cpumask_test_cpu(cpu, p->cpus_ptr)) - return false; - -- if (is_per_cpu_kthread(p)) -+ if (is_per_cpu_kthread(p) || __migrate_disabled(p)) - return cpu_online(cpu); - - return cpu_active(cpu); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch deleted file mode 100644 index 4df93059f..000000000 --- a/kernel/patches-4.19.x-rt/0231-hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 678bc8b1f66cac6073d0272788cb512333ae36a2 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 4 Aug 2017 18:31:00 +0200 -Subject: [PATCH 231/328] hotplug: duct-tape RT-rwlock usage for non-RT - -This type is only available on -RT. We need to craft something for -non-RT. Since the only migrate_disable() user is -RT only, there is no -damage. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cpu.c | 14 +++++++++++++- - 1 file changed, 13 insertions(+), 1 deletion(-) - -diff --git a/kernel/cpu.c b/kernel/cpu.c -index e005e64ae30f..948fbcc4d63c 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -75,7 +75,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { - .fail = CPUHP_INVALID, - }; - --#ifdef CONFIG_HOTPLUG_CPU -+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL) - static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ - __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); - #endif -@@ -291,6 +291,7 @@ static int cpu_hotplug_disabled; - */ - void pin_current_cpu(void) - { -+#ifdef CONFIG_PREEMPT_RT_FULL - struct rt_rw_lock *cpuhp_pin; - unsigned int cpu; - int ret; -@@ -313,6 +314,7 @@ void pin_current_cpu(void) - goto again; - } - current->pinned_on_cpu = cpu; -+#endif - } - - /** -@@ -320,6 +322,7 @@ void pin_current_cpu(void) - */ - void unpin_current_cpu(void) - { -+#ifdef CONFIG_PREEMPT_RT_FULL - struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); - - if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) -@@ -327,6 +330,7 @@ void unpin_current_cpu(void) - - current->pinned_on_cpu = -1; - __read_rt_unlock(cpuhp_pin); -+#endif - } - - DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); -@@ -886,7 +890,9 @@ static int take_cpu_down(void *_param) - - static int takedown_cpu(unsigned int cpu) - { -+#ifdef CONFIG_PREEMPT_RT_FULL - struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu); -+#endif - struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); - int err; - -@@ -899,14 +905,18 @@ static int takedown_cpu(unsigned int cpu) - */ - irq_lock_sparse(); - -+#ifdef CONFIG_PREEMPT_RT_FULL - __write_rt_lock(cpuhp_pin); -+#endif - - /* - * So now all preempt/rcu users must observe !cpu_active(). - */ - err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); - if (err) { -+#ifdef CONFIG_PREEMPT_RT_FULL - __write_rt_unlock(cpuhp_pin); -+#endif - /* CPU refused to die */ - irq_unlock_sparse(); - /* Unpark the hotplug thread so we can rollback there */ -@@ -925,7 +935,9 @@ static int takedown_cpu(unsigned int cpu) - wait_for_ap_thread(st, false); - BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); - -+#ifdef CONFIG_PREEMPT_RT_FULL - __write_rt_unlock(cpuhp_pin); -+#endif - /* Interrupts are moved away from the dying cpu, reenable alloc/free */ - irq_unlock_sparse(); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch b/kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch deleted file mode 100644 index 897e2087c..000000000 --- a/kernel/patches-4.19.x-rt/0232-net-Remove-preemption-disabling-in-netif_rx.patch +++ /dev/null @@ -1,70 +0,0 @@ -From e55d9a3613381070dbd9c5e107501fb3c8b87b0b Mon Sep 17 00:00:00 2001 -From: Priyanka Jain -Date: Thu, 17 May 2012 09:35:11 +0530 -Subject: [PATCH 232/328] net: Remove preemption disabling in netif_rx() - -1)enqueue_to_backlog() (called from netif_rx) should be - bind to a particluar CPU. This can be achieved by - disabling migration. No need to disable preemption - -2)Fixes crash "BUG: scheduling while atomic: ksoftirqd" - in case of RT. - If preemption is disabled, enqueue_to_backog() is called - in atomic context. And if backlog exceeds its count, - kfree_skb() is called. But in RT, kfree_skb() might - gets scheduled out, so it expects non atomic context. - -3)When CONFIG_PREEMPT_RT_FULL is not defined, - migrate_enable(), migrate_disable() maps to - preempt_enable() and preempt_disable(), so no - change in functionality in case of non-RT. - --Replace preempt_enable(), preempt_disable() with - migrate_enable(), migrate_disable() respectively --Replace get_cpu(), put_cpu() with get_cpu_light(), - put_cpu_light() respectively - -Signed-off-by: Priyanka Jain -Acked-by: Rajan Srivastava -Cc: -Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com - -Signed-off-by: Thomas Gleixner ---- - net/core/dev.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/net/core/dev.c b/net/core/dev.c -index 19e2cd0897b3..935156f1c5d4 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -4487,7 +4487,7 @@ static int netif_rx_internal(struct sk_buff *skb) - struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu; - -- preempt_disable(); -+ migrate_disable(); - rcu_read_lock(); - - cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4497,14 +4497,14 @@ static int netif_rx_internal(struct sk_buff *skb) - ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - - rcu_read_unlock(); -- preempt_enable(); -+ migrate_enable(); - } else - #endif - { - unsigned int qtail; - -- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); -- put_cpu(); -+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); -+ put_cpu_light(); - } - return ret; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch b/kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch deleted file mode 100644 index 73385fbe8..000000000 --- a/kernel/patches-4.19.x-rt/0233-net-Another-local_irq_disable-kmalloc-headache.patch +++ /dev/null @@ -1,64 +0,0 @@ -From 8e642bd4b1957e712c99588e4b172ffb6ea77cad Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 26 Sep 2012 16:21:08 +0200 -Subject: [PATCH 233/328] net: Another local_irq_disable/kmalloc headache - -Replace it by a local lock. Though that's pretty inefficient :( - -Signed-off-by: Thomas Gleixner ---- - net/core/skbuff.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index 0629ca89ab74..6ca7cb2b4364 100644 ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -330,6 +331,7 @@ struct napi_alloc_cache { - - static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); - static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); -+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); - - static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -@@ -337,10 +339,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - unsigned long flags; - void *data; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, fragsz, gfp_mask); -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - return data; - } - -@@ -412,13 +414,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, - if (sk_memalloc_socks()) - gfp_mask |= __GFP_MEMALLOC; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - - nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, len, gfp_mask); - pfmemalloc = nc->pfmemalloc; - -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - - if (unlikely(!data)) - return NULL; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch b/kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch deleted file mode 100644 index e75e3438e..000000000 --- a/kernel/patches-4.19.x-rt/0234-net-core-protect-users-of-napi_alloc_cache-against-r.patch +++ /dev/null @@ -1,118 +0,0 @@ -From ad95e785998d05c5e60430c272511a05a2cf65ca Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 15 Jan 2016 16:33:34 +0100 -Subject: [PATCH 234/328] net/core: protect users of napi_alloc_cache against - reentrance - -On -RT the code running in BH can not be moved to another CPU so CPU -local variable remain local. However the code can be preempted -and another task may enter BH accessing the same CPU using the same -napi_alloc_cache variable. -This patch ensures that each user of napi_alloc_cache uses a local lock. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - net/core/skbuff.c | 25 +++++++++++++++++++------ - 1 file changed, 19 insertions(+), 6 deletions(-) - -diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index 6ca7cb2b4364..c5c0d2095873 100644 ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -332,6 +332,7 @@ struct napi_alloc_cache { - static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); - static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); - static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); -+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); - - static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -@@ -363,9 +364,13 @@ EXPORT_SYMBOL(netdev_alloc_frag); - - static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct napi_alloc_cache *nc; -+ void *data; - -- return page_frag_alloc(&nc->page, fragsz, gfp_mask); -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ data = page_frag_alloc(&nc->page, fragsz, gfp_mask); -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ return data; - } - - void *napi_alloc_frag(unsigned int fragsz) -@@ -461,9 +466,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb); - struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, - gfp_t gfp_mask) - { -- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct napi_alloc_cache *nc; - struct sk_buff *skb; - void *data; -+ bool pfmemalloc; - - len += NET_SKB_PAD + NET_IP_ALIGN; - -@@ -481,7 +487,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, - if (sk_memalloc_socks()) - gfp_mask |= __GFP_MEMALLOC; - -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - data = page_frag_alloc(&nc->page, len, gfp_mask); -+ pfmemalloc = nc->page.pfmemalloc; -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - if (unlikely(!data)) - return NULL; - -@@ -492,7 +501,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, - } - - /* use OR instead of assignment to avoid clearing of bits in mask */ -- if (nc->page.pfmemalloc) -+ if (pfmemalloc) - skb->pfmemalloc = 1; - skb->head_frag = 1; - -@@ -724,23 +733,26 @@ void __consume_stateless_skb(struct sk_buff *skb) - - void __kfree_skb_flush(void) - { -- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct napi_alloc_cache *nc; - -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - /* flush skb_cache if containing objects */ - if (nc->skb_count) { - kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, - nc->skb_cache); - nc->skb_count = 0; - } -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - } - - static inline void _kfree_skb_defer(struct sk_buff *skb) - { -- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct napi_alloc_cache *nc; - - /* drop skb->head and call any destructors for packet */ - skb_release_all(skb); - -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - /* record skb to CPU local list */ - nc->skb_cache[nc->skb_count++] = skb; - -@@ -755,6 +767,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb) - nc->skb_cache); - nc->skb_count = 0; - } -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - } - void __kfree_skb_defer(struct sk_buff *skb) - { --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch b/kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch deleted file mode 100644 index 21b9ea373..000000000 --- a/kernel/patches-4.19.x-rt/0235-net-netfilter-Serialize-xt_write_recseq-sections-on-.patch +++ /dev/null @@ -1,83 +0,0 @@ -From 36ec80a84528a6adb376007baf97560835f37d79 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Sun, 28 Oct 2012 11:18:08 +0100 -Subject: [PATCH 235/328] net: netfilter: Serialize xt_write_recseq sections on - RT - -The netfilter code relies only on the implicit semantics of -local_bh_disable() for serializing wt_write_recseq sections. RT breaks -that and needs explicit serialization here. - -Reported-by: Peter LaDow -Signed-off-by: Thomas Gleixner ---- - include/linux/netfilter/x_tables.h | 7 +++++++ - net/netfilter/core.c | 6 ++++++ - 2 files changed, 13 insertions(+) - -diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h -index 9077b3ebea08..1710f2aff350 100644 ---- a/include/linux/netfilter/x_tables.h -+++ b/include/linux/netfilter/x_tables.h -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - #include - - /* Test a struct->invflags and a boolean for inequality */ -@@ -345,6 +346,8 @@ void xt_free_table_info(struct xt_table_info *info); - */ - DECLARE_PER_CPU(seqcount_t, xt_recseq); - -+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); -+ - /* xt_tee_enabled - true if x_tables needs to handle reentrancy - * - * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. -@@ -365,6 +368,9 @@ static inline unsigned int xt_write_recseq_begin(void) - { - unsigned int addend; - -+ /* RT protection */ -+ local_lock(xt_write_lock); -+ - /* - * Low order bit of sequence is set if we already - * called xt_write_recseq_begin(). -@@ -395,6 +401,7 @@ static inline void xt_write_recseq_end(unsigned int addend) - /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ - smp_wmb(); - __this_cpu_add(xt_recseq.sequence, addend); -+ local_unlock(xt_write_lock); - } - - /* -diff --git a/net/netfilter/core.c b/net/netfilter/core.c -index 93aaec3a54ec..b364cf8e5776 100644 ---- a/net/netfilter/core.c -+++ b/net/netfilter/core.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -27,6 +28,11 @@ - - #include "nf_internals.h" - -+#ifdef CONFIG_PREEMPT_RT_BASE -+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); -+EXPORT_PER_CPU_SYMBOL(xt_write_lock); -+#endif -+ - const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; - EXPORT_SYMBOL_GPL(nf_ipv6_ops); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch b/kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch deleted file mode 100644 index da9132c03..000000000 --- a/kernel/patches-4.19.x-rt/0236-net-Add-a-mutex-around-devnet_rename_seq.patch +++ /dev/null @@ -1,112 +0,0 @@ -From b93f55390ad8ab8d74d16b0d67fcfdbf8be92dae Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 20 Mar 2013 18:06:20 +0100 -Subject: [PATCH 236/328] net: Add a mutex around devnet_rename_seq - -On RT write_seqcount_begin() disables preemption and device_rename() -allocates memory with GFP_KERNEL and grabs later the sysfs_mutex -mutex. Serialize with a mutex and add use the non preemption disabling -__write_seqcount_begin(). - -To avoid writer starvation, let the reader grab the mutex and release -it when it detects a writer in progress. This keeps the normal case -(no reader on the fly) fast. - -[ tglx: Instead of replacing the seqcount by a mutex, add the mutex ] - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner ---- - net/core/dev.c | 34 ++++++++++++++++++++-------------- - 1 file changed, 20 insertions(+), 14 deletions(-) - -diff --git a/net/core/dev.c b/net/core/dev.c -index 935156f1c5d4..dbd9bdffafff 100644 ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -196,6 +196,7 @@ static unsigned int napi_gen_id = NR_CPUS; - static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); - - static seqcount_t devnet_rename_seq; -+static DEFINE_MUTEX(devnet_rename_mutex); - - static inline void dev_base_seq_inc(struct net *net) - { -@@ -921,7 +922,8 @@ int netdev_get_name(struct net *net, char *name, int ifindex) - strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { -- cond_resched(); -+ mutex_lock(&devnet_rename_mutex); -+ mutex_unlock(&devnet_rename_mutex); - goto retry; - } - -@@ -1198,20 +1200,17 @@ int dev_change_name(struct net_device *dev, const char *newname) - likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) - return -EBUSY; - -- write_seqcount_begin(&devnet_rename_seq); -+ mutex_lock(&devnet_rename_mutex); -+ __raw_write_seqcount_begin(&devnet_rename_seq); - -- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { -- write_seqcount_end(&devnet_rename_seq); -- return 0; -- } -+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) -+ goto outunlock; - - memcpy(oldname, dev->name, IFNAMSIZ); - - err = dev_get_valid_name(net, dev, newname); -- if (err < 0) { -- write_seqcount_end(&devnet_rename_seq); -- return err; -- } -+ if (err < 0) -+ goto outunlock; - - if (oldname[0] && !strchr(oldname, '%')) - netdev_info(dev, "renamed from %s\n", oldname); -@@ -1224,11 +1223,12 @@ int dev_change_name(struct net_device *dev, const char *newname) - if (ret) { - memcpy(dev->name, oldname, IFNAMSIZ); - dev->name_assign_type = old_assign_type; -- write_seqcount_end(&devnet_rename_seq); -- return ret; -+ err = ret; -+ goto outunlock; - } - -- write_seqcount_end(&devnet_rename_seq); -+ __raw_write_seqcount_end(&devnet_rename_seq); -+ mutex_unlock(&devnet_rename_mutex); - - netdev_adjacent_rename_links(dev, oldname); - -@@ -1249,7 +1249,8 @@ int dev_change_name(struct net_device *dev, const char *newname) - /* err >= 0 after dev_alloc_name() or stores the first errno */ - if (err >= 0) { - err = ret; -- write_seqcount_begin(&devnet_rename_seq); -+ mutex_lock(&devnet_rename_mutex); -+ __raw_write_seqcount_begin(&devnet_rename_seq); - memcpy(dev->name, oldname, IFNAMSIZ); - memcpy(oldname, newname, IFNAMSIZ); - dev->name_assign_type = old_assign_type; -@@ -1262,6 +1263,11 @@ int dev_change_name(struct net_device *dev, const char *newname) - } - - return err; -+ -+outunlock: -+ __raw_write_seqcount_end(&devnet_rename_seq); -+ mutex_unlock(&devnet_rename_mutex); -+ return err; - } - - /** --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch b/kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch deleted file mode 100644 index 1884f8346..000000000 --- a/kernel/patches-4.19.x-rt/0237-lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 144e32e34a0d8338cf7a1ae7cf884ed905481732 Mon Sep 17 00:00:00 2001 -From: Yong Zhang -Date: Mon, 16 Apr 2012 15:01:56 +0800 -Subject: [PATCH 237/328] lockdep: selftest: Only do hardirq context test for - raw spinlock - -On -rt there is no softirq context any more and rwlock is sleepable, -disable softirq context test and rwlock+irq test. - -Signed-off-by: Yong Zhang -Cc: Yong Zhang -Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner ---- - lib/locking-selftest.c | 23 +++++++++++++++++++++++ - 1 file changed, 23 insertions(+) - -diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c -index 1e1bbf171eca..5cdf3809905e 100644 ---- a/lib/locking-selftest.c -+++ b/lib/locking-selftest.c -@@ -2057,6 +2057,7 @@ void locking_selftest(void) - - printk(" --------------------------------------------------------------------------\n"); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * irq-context testcases: - */ -@@ -2069,6 +2070,28 @@ void locking_selftest(void) - - DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); - // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); -+#else -+ /* On -rt, we only do hardirq context test for raw spinlock */ -+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); -+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); -+ -+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); -+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); -+ -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); -+ -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); -+#endif - - ww_tests(); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch deleted file mode 100644 index 4fea24aa5..000000000 --- a/kernel/patches-4.19.x-rt/0238-lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch +++ /dev/null @@ -1,148 +0,0 @@ -From f2f9a4e47d0dc6f3817bba4c178048da4763b0c6 Mon Sep 17 00:00:00 2001 -From: Josh Cartwright -Date: Wed, 28 Jan 2015 13:08:45 -0600 -Subject: [PATCH 238/328] lockdep: selftest: fix warnings due to missing - PREEMPT_RT conditionals - -"lockdep: Selftest: Only do hardirq context test for raw spinlock" -disabled the execution of certain tests with PREEMPT_RT_FULL, but did -not prevent the tests from still being defined. This leads to warnings -like: - - ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_12' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_21' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_12' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_21' defined but not used [-Wunused-function] - ./linux/lib/locking-selftest.c:580:1: warning: 'irqsafe1_soft_spin_12' defined but not used [-Wunused-function] - ... - -Fixed by wrapping the test definitions in #ifndef CONFIG_PREEMPT_RT_FULL -conditionals. - - -Signed-off-by: Josh Cartwright -Signed-off-by: Xander Huff -Acked-by: Gratian Crisan -Signed-off-by: Sebastian Andrzej Siewior ---- - lib/locking-selftest.c | 27 +++++++++++++++++++++++++++ - 1 file changed, 27 insertions(+) - -diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c -index 5cdf3809905e..32db9532ddd4 100644 ---- a/lib/locking-selftest.c -+++ b/lib/locking-selftest.c -@@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex); - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) - -@@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Enabling hardirqs with a softirq-safe lock held: - */ -@@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) - #undef E1 - #undef E2 - -+#endif -+ - /* - * Enabling irqs with an irq-safe lock held: - */ -@@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) - -@@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - -@@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) - -@@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - #undef E3 -@@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) - -@@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - #undef E3 - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * read-lock / write-lock irq inversion. - * -@@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) - #undef E2 - #undef E3 - -+#endif -+ -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * read-lock / write-lock recursion that is actually safe. - */ -@@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) - #undef E2 - #undef E3 - -+#endif -+ - /* - * read-lock / write-lock recursion that is unsafe. - */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch deleted file mode 100644 index fb962c703..000000000 --- a/kernel/patches-4.19.x-rt/0239-sched-Add-support-for-lazy-preemption.patch +++ /dev/null @@ -1,670 +0,0 @@ -From b4aa63613da31452e5bf88d2a20f1349070b9c9e Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 26 Oct 2012 18:50:54 +0100 -Subject: [PATCH 239/328] sched: Add support for lazy preemption - -It has become an obsession to mitigate the determinism vs. throughput -loss of RT. Looking at the mainline semantics of preemption points -gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER -tasks. One major issue is the wakeup of tasks which are right away -preempting the waking task while the waking task holds a lock on which -the woken task will block right after having preempted the wakee. In -mainline this is prevented due to the implicit preemption disable of -spin/rw_lock held regions. On RT this is not possible due to the fully -preemptible nature of sleeping spinlocks. - -Though for a SCHED_OTHER task preempting another SCHED_OTHER task this -is really not a correctness issue. RT folks are concerned about -SCHED_FIFO/RR tasks preemption and not about the purely fairness -driven SCHED_OTHER preemption latencies. - -So I introduced a lazy preemption mechanism which only applies to -SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the -existing preempt_count each tasks sports now a preempt_lazy_count -which is manipulated on lock acquiry and release. This is slightly -incorrect as for lazyness reasons I coupled this on -migrate_disable/enable so some other mechanisms get the same treatment -(e.g. get_cpu_light). - -Now on the scheduler side instead of setting NEED_RESCHED this sets -NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and -therefor allows to exit the waking task the lock held region before -the woken task preempts. That also works better for cross CPU wakeups -as the other side can stay in the adaptive spinning loop. - -For RT class preemption there is no change. This simply sets -NEED_RESCHED and forgoes the lazy preemption counter. - - Initial test do not expose any observable latency increasement, but -history shows that I've been proven wrong before :) - -The lazy preemption mode is per default on, but with -CONFIG_SCHED_DEBUG enabled it can be disabled via: - - # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features - -and reenabled via - - # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features - -The test results so far are very machine and workload dependent, but -there is a clear trend that it enhances the non RT workload -performance. - -Signed-off-by: Thomas Gleixner ---- - include/linux/preempt.h | 35 ++++++++++++++- - include/linux/sched.h | 38 +++++++++++++++++ - include/linux/thread_info.h | 12 +++++- - include/linux/trace_events.h | 1 + - kernel/Kconfig.preempt | 6 +++ - kernel/cpu.c | 2 + - kernel/sched/core.c | 83 +++++++++++++++++++++++++++++++++++- - kernel/sched/fair.c | 16 +++---- - kernel/sched/features.h | 3 ++ - kernel/sched/sched.h | 9 ++++ - kernel/trace/trace.c | 36 +++++++++------- - kernel/trace/trace.h | 2 + - kernel/trace/trace_output.c | 14 +++++- - 13 files changed, 228 insertions(+), 29 deletions(-) - -diff --git a/include/linux/preempt.h b/include/linux/preempt.h -index ed8413e7140f..9c74a019bf57 100644 ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -180,6 +180,20 @@ extern void preempt_count_sub(int val); - #define preempt_count_inc() preempt_count_add(1) - #define preempt_count_dec() preempt_count_sub(1) - -+#ifdef CONFIG_PREEMPT_LAZY -+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) -+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) -+#define inc_preempt_lazy_count() add_preempt_lazy_count(1) -+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) -+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) -+#else -+#define add_preempt_lazy_count(val) do { } while (0) -+#define sub_preempt_lazy_count(val) do { } while (0) -+#define inc_preempt_lazy_count() do { } while (0) -+#define dec_preempt_lazy_count() do { } while (0) -+#define preempt_lazy_count() (0) -+#endif -+ - #ifdef CONFIG_PREEMPT_COUNT - - #define preempt_disable() \ -@@ -188,6 +202,12 @@ do { \ - barrier(); \ - } while (0) - -+#define preempt_lazy_disable() \ -+do { \ -+ inc_preempt_lazy_count(); \ -+ barrier(); \ -+} while (0) -+ - #define sched_preempt_enable_no_resched() \ - do { \ - barrier(); \ -@@ -250,6 +270,13 @@ do { \ - __preempt_schedule(); \ - } while (0) - -+#define preempt_lazy_enable() \ -+do { \ -+ dec_preempt_lazy_count(); \ -+ barrier(); \ -+ preempt_check_resched(); \ -+} while (0) -+ - #else /* !CONFIG_PREEMPT */ - #define preempt_enable() \ - do { \ -@@ -257,6 +284,12 @@ do { \ - preempt_count_dec(); \ - } while (0) - -+#define preempt_lazy_enable() \ -+do { \ -+ dec_preempt_lazy_count(); \ -+ barrier(); \ -+} while (0) -+ - #define preempt_enable_notrace() \ - do { \ - barrier(); \ -@@ -323,7 +356,7 @@ do { \ - } while (0) - #define preempt_fold_need_resched() \ - do { \ -- if (tif_need_resched()) \ -+ if (tif_need_resched_now()) \ - set_preempt_need_resched(); \ - } while (0) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index fc9300d0787d..a8ebd49c4f96 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -1733,6 +1733,44 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); -+} -+ -+static inline int need_resched_lazy(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#else -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } -+static inline int need_resched_lazy(void) { return 0; } -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#endif -+ -+ - static inline bool __task_is_stopped_or_traced(struct task_struct *task) - { - if (task->state & (__TASK_STOPPED | __TASK_TRACED)) -diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h -index 8d8821b3689a..d3fcab20d2a3 100644 ---- a/include/linux/thread_info.h -+++ b/include/linux/thread_info.h -@@ -97,7 +97,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) - #define test_thread_flag(flag) \ - test_ti_thread_flag(current_thread_info(), flag) - --#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#ifdef CONFIG_PREEMPT_LAZY -+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) -+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+ -+#else -+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_lazy() 0 -+#endif - - #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES - static inline int arch_within_stack_frames(const void * const stack, -diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h -index 1cc4d2da954c..72864a11cec0 100644 ---- a/include/linux/trace_events.h -+++ b/include/linux/trace_events.h -@@ -64,6 +64,7 @@ struct trace_entry { - int pid; - unsigned short migrate_disable; - unsigned short padding; -+ unsigned char preempt_lazy_count; - }; - - #define TRACE_EVENT_TYPE_MAX \ -diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt -index 907d72b3ba95..306567f72a3e 100644 ---- a/kernel/Kconfig.preempt -+++ b/kernel/Kconfig.preempt -@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE - bool - select PREEMPT - -+config HAVE_PREEMPT_LAZY -+ bool -+ -+config PREEMPT_LAZY -+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL -+ - choice - prompt "Preemption Model" - default PREEMPT_NONE -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 948fbcc4d63c..1541189f417b 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -304,11 +304,13 @@ void pin_current_cpu(void) - return; - } - cpu = smp_processor_id(); -+ preempt_lazy_enable(); - preempt_enable(); - - __read_rt_lock(cpuhp_pin); - - preempt_disable(); -+ preempt_lazy_disable(); - if (cpu != smp_processor_id()) { - __read_rt_unlock(cpuhp_pin); - goto again; -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6ef0dcea94d7..a17c765d3fcb 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -493,6 +493,48 @@ void resched_curr(struct rq *rq) - trace_sched_wake_idle_without_ipi(cpu); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+ -+static int tsk_is_polling(struct task_struct *p) -+{ -+#ifdef TIF_POLLING_NRFLAG -+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); -+#else -+ return 0; -+#endif -+} -+ -+void resched_curr_lazy(struct rq *rq) -+{ -+ struct task_struct *curr = rq->curr; -+ int cpu; -+ -+ if (!sched_feat(PREEMPT_LAZY)) { -+ resched_curr(rq); -+ return; -+ } -+ -+ lockdep_assert_held(&rq->lock); -+ -+ if (test_tsk_need_resched(curr)) -+ return; -+ -+ if (test_tsk_need_resched_lazy(curr)) -+ return; -+ -+ set_tsk_need_resched_lazy(curr); -+ -+ cpu = cpu_of(rq); -+ if (cpu == smp_processor_id()) -+ return; -+ -+ /* NEED_RESCHED_LAZY must be visible before we test polling */ -+ smp_mb(); -+ if (!tsk_is_polling(curr)) -+ smp_send_reschedule(cpu); -+} -+#endif -+ - void resched_cpu(int cpu) - { - struct rq *rq = cpu_rq(cpu); -@@ -2405,6 +2447,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) - p->on_cpu = 0; - #endif - init_task_preempt_count(p); -+#ifdef CONFIG_HAVE_PREEMPT_LAZY -+ task_thread_info(p)->preempt_lazy_count = 0; -+#endif - #ifdef CONFIG_SMP - plist_node_init(&p->pushable_tasks, MAX_PRIO); - RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3513,6 +3558,7 @@ static void __sched notrace __schedule(bool preempt) - - next = pick_next_task(rq, prev, &rf); - clear_tsk_need_resched(prev); -+ clear_tsk_need_resched_lazy(prev); - clear_preempt_need_resched(); - - if (likely(prev != next)) { -@@ -3693,6 +3739,30 @@ static void __sched notrace preempt_schedule_common(void) - } while (need_resched()); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+/* -+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is -+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as -+ * preempt_lazy_count counter >0. -+ */ -+static __always_inline int preemptible_lazy(void) -+{ -+ if (test_thread_flag(TIF_NEED_RESCHED)) -+ return 1; -+ if (current_thread_info()->preempt_lazy_count) -+ return 0; -+ return 1; -+} -+ -+#else -+ -+static inline int preemptible_lazy(void) -+{ -+ return 1; -+} -+ -+#endif -+ - #ifdef CONFIG_PREEMPT - /* - * this is the entry point to schedule() from in-kernel preemption -@@ -3707,7 +3777,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) - */ - if (likely(!preemptible())) - return; -- -+ if (!preemptible_lazy()) -+ return; - preempt_schedule_common(); - } - NOKPROBE_SYMBOL(preempt_schedule); -@@ -3734,6 +3805,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) - if (likely(!preemptible())) - return; - -+ if (!preemptible_lazy()) -+ return; -+ - do { - /* - * Because the function tracer can trace preempt_count_sub() -@@ -5503,7 +5577,9 @@ void init_idle(struct task_struct *idle, int cpu) - - /* Set the preempt count _outside_ the spinlocks! */ - init_idle_preempt_count(idle, cpu); -- -+#ifdef CONFIG_HAVE_PREEMPT_LAZY -+ task_thread_info(idle)->preempt_lazy_count = 0; -+#endif - /* - * The idle tasks have their own, simple scheduling class: - */ -@@ -7228,6 +7304,7 @@ void migrate_disable(void) - } - - preempt_disable(); -+ preempt_lazy_disable(); - pin_current_cpu(); - - migrate_disable_update_cpus_allowed(p); -@@ -7295,6 +7372,7 @@ void migrate_enable(void) - arg.dest_cpu = dest_cpu; - - unpin_current_cpu(); -+ preempt_lazy_enable(); - preempt_enable(); - stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); - tlb_migrate_finish(p->mm); -@@ -7303,6 +7381,7 @@ void migrate_enable(void) - } - } - unpin_current_cpu(); -+ preempt_lazy_enable(); - preempt_enable(); - } - EXPORT_SYMBOL(migrate_enable); -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 27f9f9a785c1..77d991e9ba23 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -4091,7 +4091,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) - ideal_runtime = sched_slice(cfs_rq, curr); - delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime) { -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - /* - * The current task ran long enough, ensure it doesn't get - * re-elected due to buddy favours. -@@ -4115,7 +4115,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) - return; - - if (delta > ideal_runtime) -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - } - - static void -@@ -4257,7 +4257,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) - * validating it and just reschedule. - */ - if (queued) { -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - return; - } - /* -@@ -4391,7 +4391,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) - * hierarchy can be throttled - */ - if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - } - - static __always_inline -@@ -5091,7 +5091,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) - - if (delta < 0) { - if (rq->curr == p) -- resched_curr(rq); -+ resched_curr_lazy(rq); - return; - } - hrtick_start(rq, delta); -@@ -6684,7 +6684,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ - return; - - preempt: -- resched_curr(rq); -+ resched_curr_lazy(rq); - /* - * Only set the backward buddy when the current task is still - * on the rq. This can happen when a wakeup gets interleaved -@@ -9785,7 +9785,7 @@ static void task_fork_fair(struct task_struct *p) - * 'current' within the tree based on its new key value. - */ - swap(curr->vruntime, se->vruntime); -- resched_curr(rq); -+ resched_curr_lazy(rq); - } - - se->vruntime -= cfs_rq->min_vruntime; -@@ -9809,7 +9809,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) - */ - if (rq->curr == p) { - if (p->prio > oldprio) -- resched_curr(rq); -+ resched_curr_lazy(rq); - } else - check_preempt_curr(rq, p, 0); - } -diff --git a/kernel/sched/features.h b/kernel/sched/features.h -index 68de18405857..12a12be6770b 100644 ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true) - - #ifdef CONFIG_PREEMPT_RT_FULL - SCHED_FEAT(TTWU_QUEUE, false) -+# ifdef CONFIG_PREEMPT_LAZY -+SCHED_FEAT(PREEMPT_LAZY, true) -+# endif - #else - - /* -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index c79e32488940..c90574112bca 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1634,6 +1634,15 @@ extern void reweight_task(struct task_struct *p, int prio); - extern void resched_curr(struct rq *rq); - extern void resched_cpu(int cpu); - -+#ifdef CONFIG_PREEMPT_LAZY -+extern void resched_curr_lazy(struct rq *rq); -+#else -+static inline void resched_curr_lazy(struct rq *rq) -+{ -+ resched_curr(rq); -+} -+#endif -+ - extern struct rt_bandwidth def_rt_bandwidth; - extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); - -diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index 5eccf1c201db..d13b58073bce 100644 ---- a/kernel/trace/trace.c -+++ b/kernel/trace/trace.c -@@ -2137,6 +2137,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, - struct task_struct *tsk = current; - - entry->preempt_count = pc & 0xff; -+ entry->preempt_lazy_count = preempt_lazy_count(); - entry->pid = (tsk) ? tsk->pid : 0; - entry->flags = - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -2147,7 +2148,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, - ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | - ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | - ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | -- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | -+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | -+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | - (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); - - entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; -@@ -3349,15 +3351,17 @@ get_total_entries(struct trace_buffer *buf, - - static void print_lat_help_header(struct seq_file *m) - { -- seq_puts(m, "# _------=> CPU# \n" -- "# / _-----=> irqs-off \n" -- "# | / _----=> need-resched \n" -- "# || / _---=> hardirq/softirq \n" -- "# ||| / _--=> preempt-depth \n" -- "# |||| / _--=> migrate-disable\n" -- "# ||||| / delay \n" -- "# cmd pid |||||| time | caller \n" -- "# \\ / ||||| \\ | / \n"); -+ seq_puts(m, "# _--------=> CPU# \n" -+ "# / _-------=> irqs-off \n" -+ "# | / _------=> need-resched \n" -+ "# || / _-----=> need-resched_lazy \n" -+ "# ||| / _----=> hardirq/softirq \n" -+ "# |||| / _---=> preempt-depth \n" -+ "# ||||| / _--=> preempt-lazy-depth\n" -+ "# |||||| / _-=> migrate-disable \n" -+ "# ||||||| / delay \n" -+ "# cmd pid |||||||| time | caller \n" -+ "# \\ / |||||||| \\ | / \n"); - } - - static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -3395,15 +3399,17 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file - tgid ? tgid_space : space); - seq_printf(m, "# %s / _----=> need-resched\n", - tgid ? tgid_space : space); -- seq_printf(m, "# %s| / _---=> hardirq/softirq\n", -+ seq_printf(m, "# %s| / _---=> need-resched_lazy\n", - tgid ? tgid_space : space); -- seq_printf(m, "# %s|| / _--=> preempt-depth\n", -+ seq_printf(m, "# %s|| / _--=> hardirq/softirq\n", - tgid ? tgid_space : space); -- seq_printf(m, "# %s||| / delay\n", -+ seq_printf(m, "# %s||| / preempt-depth\n", - tgid ? tgid_space : space); -- seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", -+ seq_printf(m, "# %s|||| / delay\n", -+ tgid ? tgid_space : space); -+ seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n", - tgid ? " TGID " : space); -- seq_printf(m, "# | | %s | |||| | |\n", -+ seq_printf(m, "# | | %s | ||||| | |\n", - tgid ? " | " : space); - } - -diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h -index ee0c6a313ed1..5986f494e2b0 100644 ---- a/kernel/trace/trace.h -+++ b/kernel/trace/trace.h -@@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head { - * NEED_RESCHED - reschedule is requested - * HARDIRQ - inside an interrupt handler - * SOFTIRQ - inside a softirq handler -+ * NEED_RESCHED_LAZY - lazy reschedule is requested - */ - enum trace_flag_type { - TRACE_FLAG_IRQS_OFF = 0x01, -@@ -136,6 +137,7 @@ enum trace_flag_type { - TRACE_FLAG_SOFTIRQ = 0x10, - TRACE_FLAG_PREEMPT_RESCHED = 0x20, - TRACE_FLAG_NMI = 0x40, -+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80, - }; - - #define TRACE_BUF_SIZE 1024 -diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c -index 46c96744f09d..3f78b0afb729 100644 ---- a/kernel/trace/trace_output.c -+++ b/kernel/trace/trace_output.c -@@ -448,6 +448,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) - { - char hardsoft_irq; - char need_resched; -+ char need_resched_lazy; - char irqs_off; - int hardirq; - int softirq; -@@ -478,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) - break; - } - -+ need_resched_lazy = -+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; -+ - hardsoft_irq = - (nmi && hardirq) ? 'Z' : - nmi ? 'z' : -@@ -486,14 +490,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) - softirq ? 's' : - '.' ; - -- trace_seq_printf(s, "%c%c%c", -- irqs_off, need_resched, hardsoft_irq); -+ trace_seq_printf(s, "%c%c%c%c", -+ irqs_off, need_resched, need_resched_lazy, -+ hardsoft_irq); - - if (entry->preempt_count) - trace_seq_printf(s, "%x", entry->preempt_count); - else - trace_seq_putc(s, '.'); - -+ if (entry->preempt_lazy_count) -+ trace_seq_printf(s, "%x", entry->preempt_lazy_count); -+ else -+ trace_seq_putc(s, '.'); -+ - if (entry->migrate_disable) - trace_seq_printf(s, "%x", entry->migrate_disable); - else --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch b/kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch deleted file mode 100644 index d8d63398e..000000000 --- a/kernel/patches-4.19.x-rt/0240-ftrace-Fix-trace-header-alignment.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 9f97e04e5ea92015801bab08af99228bbe2f4085 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Sun, 16 Oct 2016 05:08:30 +0200 -Subject: [PATCH 240/328] ftrace: Fix trace header alignment - -Line up helper arrows to the right column. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Mike Galbraith -[bigeasy: fixup function tracer header] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/trace/trace.c | 22 +++++++++++----------- - 1 file changed, 11 insertions(+), 11 deletions(-) - -diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c -index d13b58073bce..99dde9db6e3e 100644 ---- a/kernel/trace/trace.c -+++ b/kernel/trace/trace.c -@@ -3351,17 +3351,17 @@ get_total_entries(struct trace_buffer *buf, - - static void print_lat_help_header(struct seq_file *m) - { -- seq_puts(m, "# _--------=> CPU# \n" -- "# / _-------=> irqs-off \n" -- "# | / _------=> need-resched \n" -- "# || / _-----=> need-resched_lazy \n" -- "# ||| / _----=> hardirq/softirq \n" -- "# |||| / _---=> preempt-depth \n" -- "# ||||| / _--=> preempt-lazy-depth\n" -- "# |||||| / _-=> migrate-disable \n" -- "# ||||||| / delay \n" -- "# cmd pid |||||||| time | caller \n" -- "# \\ / |||||||| \\ | / \n"); -+ seq_puts(m, "# _--------=> CPU# \n" -+ "# / _-------=> irqs-off \n" -+ "# | / _------=> need-resched \n" -+ "# || / _-----=> need-resched_lazy \n" -+ "# ||| / _----=> hardirq/softirq \n" -+ "# |||| / _---=> preempt-depth \n" -+ "# ||||| / _--=> preempt-lazy-depth\n" -+ "# |||||| / _-=> migrate-disable \n" -+ "# ||||||| / delay \n" -+ "# cmd pid |||||||| time | caller \n" -+ "# \\ / |||||||| \\ | / \n"); - } - - static void print_event_info(struct trace_buffer *buf, struct seq_file *m) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch deleted file mode 100644 index 2ba0e721c..000000000 --- a/kernel/patches-4.19.x-rt/0241-x86-Support-for-lazy-preemption.patch +++ /dev/null @@ -1,239 +0,0 @@ -From 963f0a5965bbcac189d89f4501cdd7a26bd823df Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 1 Nov 2012 11:03:47 +0100 -Subject: [PATCH 241/328] x86: Support for lazy preemption - -Implement the x86 pieces for lazy preempt. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/Kconfig | 1 + - arch/x86/entry/common.c | 4 ++-- - arch/x86/entry/entry_32.S | 17 ++++++++++++++++ - arch/x86/entry/entry_64.S | 16 +++++++++++++++ - arch/x86/include/asm/preempt.h | 31 +++++++++++++++++++++++++++++- - arch/x86/include/asm/thread_info.h | 11 +++++++++++ - arch/x86/kernel/asm-offsets.c | 2 ++ - 7 files changed, 79 insertions(+), 3 deletions(-) - -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index f22e787329cf..a56f57f95993 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -180,6 +180,7 @@ config X86 - select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -+ select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE if PARAVIRT - select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE - select HAVE_REGS_AND_STACK_ACCESS_API -diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c -index 91676b0d2d4c..3b5e41d9b29d 100644 ---- a/arch/x86/entry/common.c -+++ b/arch/x86/entry/common.c -@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct pt_regs *regs) - - #define EXIT_TO_USERMODE_LOOP_FLAGS \ - (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) -+ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) - - static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) - { -@@ -149,7 +149,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) - /* We have work to do. */ - local_irq_enable(); - -- if (cached_flags & _TIF_NEED_RESCHED) -+ if (cached_flags & _TIF_NEED_RESCHED_MASK) - schedule(); - - #ifdef ARCH_RT_DELAYS_SIGNAL_SEND -diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index d07432062ee6..d44f446a0e74 100644 ---- a/arch/x86/entry/entry_32.S -+++ b/arch/x86/entry/entry_32.S -@@ -750,8 +750,25 @@ END(ret_from_exception) - ENTRY(resume_kernel) - DISABLE_INTERRUPTS(CLBR_ANY) - .Lneed_resched: -+ # preempt count == 0 + NEED_RS set? - cmpl $0, PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all_kernel -+#else -+ jz test_int_off -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jne restore_all -+ -+ movl PER_CPU_VAR(current_task), %ebp -+ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? -+ jnz restore_all -+ -+ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) -+ jz restore_all -+ test_int_off: -+#endif - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all_kernel - call preempt_schedule_irq -diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S -index 7ffd83c57ef2..663a99f6320f 100644 ---- a/arch/x86/entry/entry_64.S -+++ b/arch/x86/entry/entry_64.S -@@ -732,7 +732,23 @@ retint_kernel: - btl $9, EFLAGS(%rsp) /* were interrupts off? */ - jnc 1f - 0: cmpl $0, PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz 1f -+#else -+ jz do_preempt_schedule_irq -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jnz 1f -+ -+ movq PER_CPU_VAR(current_task), %rcx -+ cmpl $0, TASK_TI_preempt_lazy_count(%rcx) -+ jnz 1f -+ -+ btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) -+ jnc 1f -+do_preempt_schedule_irq: -+#endif - call preempt_schedule_irq - jmp 0b - 1: -diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h -index 7f2dbd91fc74..22992c837795 100644 ---- a/arch/x86/include/asm/preempt.h -+++ b/arch/x86/include/asm/preempt.h -@@ -86,17 +86,46 @@ static __always_inline void __preempt_count_sub(int val) - * a decrement which hits zero means we have no preempt_count and should - * reschedule. - */ --static __always_inline bool __preempt_count_dec_and_test(void) -+static __always_inline bool ____preempt_count_dec_and_test(void) - { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); - } - -+static __always_inline bool __preempt_count_dec_and_test(void) -+{ -+ if (____preempt_count_dec_and_test()) -+ return true; -+#ifdef CONFIG_PREEMPT_LAZY -+ if (current_thread_info()->preempt_lazy_count) -+ return false; -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+#else -+ return false; -+#endif -+} -+ - /* - * Returns true when we need to resched and can (barring IRQ state). - */ - static __always_inline bool should_resched(int preempt_offset) - { -+#ifdef CONFIG_PREEMPT_LAZY -+ u32 tmp; -+ -+ tmp = raw_cpu_read_4(__preempt_count); -+ if (tmp == preempt_offset) -+ return true; -+ -+ /* preempt count == 0 ? */ -+ tmp &= ~PREEMPT_NEED_RESCHED; -+ if (tmp) -+ return false; -+ if (current_thread_info()->preempt_lazy_count) -+ return false; -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+#else - return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); -+#endif - } - - #ifdef CONFIG_PREEMPT -diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h -index 82b73b75d67c..dc267291f131 100644 ---- a/arch/x86/include/asm/thread_info.h -+++ b/arch/x86/include/asm/thread_info.h -@@ -56,17 +56,24 @@ struct task_struct; - struct thread_info { - unsigned long flags; /* low level flags */ - u32 status; /* thread synchronous flags */ -+ int preempt_lazy_count; /* 0 => lazy preemptable -+ <0 => BUG */ - }; - - #define INIT_THREAD_INFO(tsk) \ - { \ - .flags = 0, \ -+ .preempt_lazy_count = 0, \ - } - - #else /* !__ASSEMBLY__ */ - - #include - -+#define GET_THREAD_INFO(reg) \ -+ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ -+ _ASM_SUB $(THREAD_SIZE),reg ; -+ - #endif - - /* -@@ -91,6 +98,7 @@ struct thread_info { - #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ - #define TIF_NOTSC 16 /* TSC is not accessible in userland */ - #define TIF_IA32 17 /* IA32 compatibility process */ -+#define TIF_NEED_RESCHED_LAZY 18 /* lazy rescheduling necessary */ - #define TIF_NOHZ 19 /* in adaptive nohz mode */ - #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ - #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ -@@ -120,6 +128,7 @@ struct thread_info { - #define _TIF_NOCPUID (1 << TIF_NOCPUID) - #define _TIF_NOTSC (1 << TIF_NOTSC) - #define _TIF_IA32 (1 << TIF_IA32) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_NOHZ (1 << TIF_NOHZ) - #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) - #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) -@@ -165,6 +174,8 @@ struct thread_info { - #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) - #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) - -+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) -+ - #define STACK_WARN (THREAD_SIZE/8) - - /* -diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c -index 01de31db300d..ce1c5b9fbd8c 100644 ---- a/arch/x86/kernel/asm-offsets.c -+++ b/arch/x86/kernel/asm-offsets.c -@@ -38,6 +38,7 @@ void common(void) { - - BLANK(); - OFFSET(TASK_TI_flags, task_struct, thread_info.flags); -+ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); - OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); - - BLANK(); -@@ -94,6 +95,7 @@ void common(void) { - - BLANK(); - DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); -+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); - - /* TLB state for the entry code */ - OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch b/kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch deleted file mode 100644 index 30d56c7eb..000000000 --- a/kernel/patches-4.19.x-rt/0242-x86-lazy-preempt-properly-check-against-preempt-mask.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 9e04eb54c312d0436d51e83306de05517b13d458 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 18 Feb 2019 16:57:09 +0100 -Subject: [PATCH 242/328] x86: lazy-preempt: properly check against - preempt-mask - -should_resched() should check against preempt_offset after unmasking the -need-resched-bit. Otherwise should_resched() won't work for -preempt_offset != 0 and lazy-preempt set. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/include/asm/preempt.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h -index 22992c837795..f66708779274 100644 ---- a/arch/x86/include/asm/preempt.h -+++ b/arch/x86/include/asm/preempt.h -@@ -118,7 +118,7 @@ static __always_inline bool should_resched(int preempt_offset) - - /* preempt count == 0 ? */ - tmp &= ~PREEMPT_NEED_RESCHED; -- if (tmp) -+ if (tmp != preempt_offset) - return false; - if (current_thread_info()->preempt_lazy_count) - return false; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch b/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch deleted file mode 100644 index 62239f187..000000000 --- a/kernel/patches-4.19.x-rt/0243-x86-lazy-preempt-use-proper-return-label-on-32bit-x8.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 95f51ccaa646d290d44e1c6a6fece8cebcce7f82 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 26 Feb 2019 14:53:49 +0100 -Subject: [PATCH 243/328] x86: lazy-preempt: use proper return label on - 32bit-x86 - -The lazy-preempt uses the wrong return label in case preemption isn't -possible. This results crash while returning to the kernel. - -Use the correct return label if preemption isn' possible. - -Reported-by: Andri Yngvason -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/x86/entry/entry_32.S | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S -index d44f446a0e74..56398b07211b 100644 ---- a/arch/x86/entry/entry_32.S -+++ b/arch/x86/entry/entry_32.S -@@ -759,15 +759,15 @@ ENTRY(resume_kernel) - - # atleast preempt count == 0 ? - cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -- jne restore_all -+ jne restore_all_kernel - - movl PER_CPU_VAR(current_task), %ebp - cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? -- jnz restore_all -+ jnz restore_all_kernel - - testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) -- jz restore_all -- test_int_off: -+ jz restore_all_kernel -+test_int_off: - #endif - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all_kernel --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch deleted file mode 100644 index 06cea2ecf..000000000 --- a/kernel/patches-4.19.x-rt/0244-arm-Add-support-for-lazy-preemption.patch +++ /dev/null @@ -1,167 +0,0 @@ -From 36713bb8882eef3bd06bc3de88d56fcaa0d968c8 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 31 Oct 2012 12:04:11 +0100 -Subject: [PATCH 244/328] arm: Add support for lazy preemption - -Implement the arm pieces for lazy preempt. - -Signed-off-by: Thomas Gleixner ---- - arch/arm/Kconfig | 1 + - arch/arm/include/asm/thread_info.h | 8 ++++++-- - arch/arm/kernel/asm-offsets.c | 1 + - arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++--- - arch/arm/kernel/entry-common.S | 9 +++++++-- - arch/arm/kernel/signal.c | 3 ++- - 6 files changed, 33 insertions(+), 8 deletions(-) - -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index bd9d180db5c7..9945e699e0a4 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -90,6 +90,7 @@ config ARM - select HAVE_PERF_EVENTS - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -+ select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RSEQ -diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h -index 8f55dc520a3e..4f834bfca470 100644 ---- a/arch/arm/include/asm/thread_info.h -+++ b/arch/arm/include/asm/thread_info.h -@@ -49,6 +49,7 @@ struct cpu_context_save { - struct thread_info { - unsigned long flags; /* low level flags */ - int preempt_count; /* 0 => preemptable, <0 => bug */ -+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ - mm_segment_t addr_limit; /* address limit */ - struct task_struct *task; /* main task structure */ - __u32 cpu; /* cpu */ -@@ -139,7 +140,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, - #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ - #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ - #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ --#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ -+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ -+#define TIF_NEED_RESCHED_LAZY 7 - - #define TIF_NOHZ 12 /* in adaptive nohz mode */ - #define TIF_USING_IWMMXT 17 -@@ -149,6 +151,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, - #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) - #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_UPROBE (1 << TIF_UPROBE) - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -@@ -164,7 +167,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, - * Change these and you break ASM code in entry-common.S - */ - #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ -- _TIF_NOTIFY_RESUME | _TIF_UPROBE) -+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -+ _TIF_NEED_RESCHED_LAZY) - - #endif /* __KERNEL__ */ - #endif /* __ASM_ARM_THREAD_INFO_H */ -diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c -index 3968d6c22455..b35d373fc982 100644 ---- a/arch/arm/kernel/asm-offsets.c -+++ b/arch/arm/kernel/asm-offsets.c -@@ -56,6 +56,7 @@ int main(void) - BLANK(); - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); -+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); -diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S -index e85a3af9ddeb..cc67c0a3ae7b 100644 ---- a/arch/arm/kernel/entry-armv.S -+++ b/arch/arm/kernel/entry-armv.S -@@ -216,11 +216,18 @@ __irq_svc: - - #ifdef CONFIG_PREEMPT - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count -- ldr r0, [tsk, #TI_FLAGS] @ get flags - teq r8, #0 @ if preempt count != 0 -+ bne 1f @ return from exeption -+ ldr r0, [tsk, #TI_FLAGS] @ get flags -+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set -+ blne svc_preempt @ preempt! -+ -+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count -+ teq r8, #0 @ if preempt lazy count != 0 - movne r0, #0 @ force flags to 0 -- tst r0, #_TIF_NEED_RESCHED -+ tst r0, #_TIF_NEED_RESCHED_LAZY - blne svc_preempt -+1: - #endif - - svc_exit r5, irq = 1 @ return from exception -@@ -235,8 +242,14 @@ svc_preempt: - 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS - tst r0, #_TIF_NEED_RESCHED -+ bne 1b -+ tst r0, #_TIF_NEED_RESCHED_LAZY - reteq r8 @ go again -- b 1b -+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count -+ teq r0, #0 @ if preempt lazy count != 0 -+ beq 1b -+ ret r8 @ go again -+ - #endif - - __und_fault: -diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S -index 0465d65d23de..47675830ed30 100644 ---- a/arch/arm/kernel/entry-common.S -+++ b/arch/arm/kernel/entry-common.S -@@ -56,7 +56,9 @@ __ret_fast_syscall: - cmp r2, #TASK_SIZE - blne addr_limit_check_failed - ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing -- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne fast_work_pending -+ tst r1, #_TIF_SECCOMP - bne fast_work_pending - - -@@ -93,8 +95,11 @@ __ret_fast_syscall: - cmp r2, #TASK_SIZE - blne addr_limit_check_failed - ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing -- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne do_slower_path -+ tst r1, #_TIF_SECCOMP - beq no_work_pending -+do_slower_path: - UNWIND(.fnend ) - ENDPROC(ret_fast_syscall) - -diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c -index b908382b69ff..339fbc281cf1 100644 ---- a/arch/arm/kernel/signal.c -+++ b/arch/arm/kernel/signal.c -@@ -652,7 +652,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) - */ - trace_hardirqs_off(); - do { -- if (likely(thread_flags & _TIF_NEED_RESCHED)) { -+ if (likely(thread_flags & (_TIF_NEED_RESCHED | -+ _TIF_NEED_RESCHED_LAZY))) { - schedule(); - } else { - if (unlikely(!user_mode(regs))) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch b/kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch deleted file mode 100644 index adfa8e185..000000000 --- a/kernel/patches-4.19.x-rt/0245-powerpc-Add-support-for-lazy-preemption.patch +++ /dev/null @@ -1,195 +0,0 @@ -From e02242904650a778f562757a77e406336000d790 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 1 Nov 2012 10:14:11 +0100 -Subject: [PATCH 245/328] powerpc: Add support for lazy preemption - -Implement the powerpc pieces for lazy preempt. - -Signed-off-by: Thomas Gleixner ---- - arch/powerpc/Kconfig | 1 + - arch/powerpc/include/asm/thread_info.h | 9 +++++++-- - arch/powerpc/kernel/asm-offsets.c | 1 + - arch/powerpc/kernel/entry_32.S | 17 ++++++++++++----- - arch/powerpc/kernel/entry_64.S | 16 ++++++++++++---- - 5 files changed, 33 insertions(+), 11 deletions(-) - -diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index 1b332f69dd36..c8c2230c8aff 100644 ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -216,6 +216,7 @@ config PPC - select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -+ select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE if SMP - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN -diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h -index 3c0002044bc9..ce316076bc52 100644 ---- a/arch/powerpc/include/asm/thread_info.h -+++ b/arch/powerpc/include/asm/thread_info.h -@@ -37,6 +37,8 @@ struct thread_info { - int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, - <0 => BUG */ -+ int preempt_lazy_count; /* 0 => preemptable, -+ <0 => BUG */ - unsigned long local_flags; /* private flags for thread */ - #ifdef CONFIG_LIVEPATCH - unsigned long *livepatch_sp; -@@ -81,7 +83,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src - #define TIF_SIGPENDING 1 /* signal pending */ - #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ - #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ --#define TIF_32BIT 4 /* 32 bit binary */ -+#define TIF_NEED_RESCHED_LAZY 4 /* lazy rescheduling necessary */ - #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ - #define TIF_PATCH_PENDING 6 /* pending live patching update */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ -@@ -100,6 +102,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src - #define TIF_ELF2ABI 18 /* function descriptors must die! */ - #endif - #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -+#define TIF_32BIT 20 /* 32 bit binary */ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1< -Date: Thu, 14 May 2015 17:52:17 +0200 -Subject: [PATCH 246/328] arch/arm64: Add lazy preempt support - -arm64 is missing support for PREEMPT_RT. The main feature which is -lacking is support for lazy preemption. The arch-specific entry code, -thread information structure definitions, and associated data tables -have to be extended to provide this support. Then the Kconfig file has -to be extended to indicate the support is available, and also to -indicate that support for full RT preemption is now available. - -Signed-off-by: Anders Roxell ---- - arch/arm64/Kconfig | 1 + - arch/arm64/include/asm/thread_info.h | 6 +++++- - arch/arm64/kernel/asm-offsets.c | 1 + - arch/arm64/kernel/entry.S | 12 +++++++++--- - arch/arm64/kernel/signal.c | 2 +- - 5 files changed, 17 insertions(+), 5 deletions(-) - -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 51fe21f5d078..9bf5be2d6024 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -141,6 +141,7 @@ config ARM64 - select HAVE_PERF_EVENTS - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -+ select HAVE_PREEMPT_LAZY - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RCU_TABLE_FREE - select HAVE_RSEQ -diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h -index cb2c10a8f0a8..f1820f7318b6 100644 ---- a/arch/arm64/include/asm/thread_info.h -+++ b/arch/arm64/include/asm/thread_info.h -@@ -43,6 +43,7 @@ struct thread_info { - u64 ttbr0; /* saved TTBR0_EL1 */ - #endif - int preempt_count; /* 0 => preemptable, <0 => bug */ -+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ - }; - - #define thread_saved_pc(tsk) \ -@@ -76,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk); - #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ - #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ - #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ -+#define TIF_NEED_RESCHED_LAZY 6 - #define TIF_NOHZ 7 - #define TIF_SYSCALL_TRACE 8 - #define TIF_SYSCALL_AUDIT 9 -@@ -94,6 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk); - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) - #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) - #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_NOHZ (1 << TIF_NOHZ) - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -@@ -106,8 +109,9 @@ void arch_release_task_struct(struct task_struct *tsk); - - #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ -- _TIF_UPROBE | _TIF_FSCHECK) -+ _TIF_UPROBE | _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY) - -+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) - #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ - _TIF_NOHZ) -diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c -index 92fba851ce53..844c71bc865b 100644 ---- a/arch/arm64/kernel/asm-offsets.c -+++ b/arch/arm64/kernel/asm-offsets.c -@@ -41,6 +41,7 @@ int main(void) - BLANK(); - DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); - DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); -+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count)); - DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); - #ifdef CONFIG_ARM64_SW_TTBR0_PAN - DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); -diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S -index 5f800384cb9a..b582580c8c4c 100644 ---- a/arch/arm64/kernel/entry.S -+++ b/arch/arm64/kernel/entry.S -@@ -623,11 +623,16 @@ el1_irq: - - #ifdef CONFIG_PREEMPT - ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count -- cbnz w24, 1f // preempt count != 0 -+ cbnz w24, 2f // preempt count != 0 - ldr x0, [tsk, #TSK_TI_FLAGS] // get flags -- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? -- bl el1_preempt -+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? -+ -+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count -+ cbnz w24, 2f // preempt lazy count != 0 -+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? - 1: -+ bl el1_preempt -+2: - #endif - #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -@@ -641,6 +646,7 @@ el1_preempt: - 1: bl preempt_schedule_irq // irq en/disable is done inside - ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS - tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? -+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? - ret x24 - #endif - -diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c -index 5dcc942906db..4fec251fe147 100644 ---- a/arch/arm64/kernel/signal.c -+++ b/arch/arm64/kernel/signal.c -@@ -926,7 +926,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, - /* Check valid user FS if needed */ - addr_limit_user_check(); - -- if (thread_flags & _TIF_NEED_RESCHED) { -+ if (thread_flags & _TIF_NEED_RESCHED_MASK) { - /* Unmask Debug and SError for the next task */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch deleted file mode 100644 index 0ab146922..000000000 --- a/kernel/patches-4.19.x-rt/0247-connector-cn_proc-Protect-send_msg-with-a-local-lock.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 8d0546a63b1afa99131c556d9f273f8865eaf480 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Sun, 16 Oct 2016 05:11:54 +0200 -Subject: [PATCH 247/328] connector/cn_proc: Protect send_msg() with a local - lock on RT - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931 -|in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep -|Preemption disabled at:[] proc_exit_connector+0xbb/0x140 -| -|CPU: 4 PID: 31807 Comm: sleep Tainted: G W E 4.8.0-rt11-rt #106 -|Call Trace: -| [] dump_stack+0x65/0x88 -| [] ___might_sleep+0xf5/0x180 -| [] __rt_spin_lock+0x20/0x50 -| [] rt_read_lock+0x28/0x30 -| [] netlink_broadcast_filtered+0x49/0x3f0 -| [] ? __kmalloc_reserve.isra.33+0x31/0x90 -| [] netlink_broadcast+0x1d/0x20 -| [] cn_netlink_send_mult+0x19a/0x1f0 -| [] cn_netlink_send+0x1b/0x20 -| [] proc_exit_connector+0xf8/0x140 -| [] do_exit+0x5d1/0xba0 -| [] do_group_exit+0x4c/0xc0 -| [] SyS_exit_group+0x14/0x20 -| [] entry_SYSCALL_64_fastpath+0x1a/0xa4 - -Since ab8ed951080e ("connector: fix out-of-order cn_proc netlink message -delivery") which is v4.7-rc6. - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/connector/cn_proc.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c -index ad48fd52cb53..c5264b3ee0b0 100644 ---- a/drivers/connector/cn_proc.c -+++ b/drivers/connector/cn_proc.c -@@ -32,6 +32,7 @@ - #include - - #include -+#include - - /* - * Size of a cn_msg followed by a proc_event structure. Since the -@@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; - - /* proc_event_counts is used as the sequence number of the netlink message */ - static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; -+static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock); - - static inline void send_msg(struct cn_msg *msg) - { -- preempt_disable(); -+ local_lock(send_msg_lock); - - msg->seq = __this_cpu_inc_return(proc_event_counts) - 1; - ((struct proc_event *)msg->data)->cpu = smp_processor_id(); -@@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg) - */ - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); - -- preempt_enable(); -+ local_unlock(send_msg_lock); - } - - void proc_fork_connector(struct task_struct *task) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch deleted file mode 100644 index 7c3c4801a..000000000 --- a/kernel/patches-4.19.x-rt/0248-drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch +++ /dev/null @@ -1,103 +0,0 @@ -From 50ea98cbb28042ef353cdb79d52d70d051d45c50 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Thu, 31 Mar 2016 04:08:28 +0200 -Subject: [PATCH 248/328] drivers/block/zram: Replace bit spinlocks with - rtmutex for -rt - -They're nondeterministic, and lead to ___might_sleep() splats in -rt. -OTOH, they're a lot less wasteful than an rtmutex per page. - -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/block/zram/zram_drv.c | 38 +++++++++++++++++++++++++++++++++++ - drivers/block/zram/zram_drv.h | 3 +++ - 2 files changed, 41 insertions(+) - -diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index 76abe40bfa83..d6cf9508b80d 100644 ---- a/drivers/block/zram/zram_drv.c -+++ b/drivers/block/zram/zram_drv.c -@@ -53,6 +53,40 @@ static size_t huge_class_size; - - static void zram_free_page(struct zram *zram, size_t index); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) -+{ -+ size_t index; -+ -+ for (index = 0; index < num_pages; index++) -+ spin_lock_init(&zram->table[index].lock); -+} -+ -+static int zram_slot_trylock(struct zram *zram, u32 index) -+{ -+ int ret; -+ -+ ret = spin_trylock(&zram->table[index].lock); -+ if (ret) -+ __set_bit(ZRAM_LOCK, &zram->table[index].value); -+ return ret; -+} -+ -+static void zram_slot_lock(struct zram *zram, u32 index) -+{ -+ spin_lock(&zram->table[index].lock); -+ __set_bit(ZRAM_LOCK, &zram->table[index].value); -+} -+ -+static void zram_slot_unlock(struct zram *zram, u32 index) -+{ -+ __clear_bit(ZRAM_LOCK, &zram->table[index].value); -+ spin_unlock(&zram->table[index].lock); -+} -+ -+#else -+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } -+ - static int zram_slot_trylock(struct zram *zram, u32 index) - { - return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value); -@@ -67,6 +101,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index) - { - bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value); - } -+#endif - - static inline bool init_done(struct zram *zram) - { -@@ -902,6 +937,8 @@ static DEVICE_ATTR_RO(io_stat); - static DEVICE_ATTR_RO(mm_stat); - static DEVICE_ATTR_RO(debug_stat); - -+ -+ - static void zram_meta_free(struct zram *zram, u64 disksize) - { - size_t num_pages = disksize >> PAGE_SHIFT; -@@ -932,6 +969,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) - - if (!huge_class_size) - huge_class_size = zs_huge_class_size(zram->mem_pool); -+ zram_meta_init_table_locks(zram, num_pages); - return true; - } - -diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h -index d1095dfdffa8..144e91061df8 100644 ---- a/drivers/block/zram/zram_drv.h -+++ b/drivers/block/zram/zram_drv.h -@@ -61,6 +61,9 @@ struct zram_table_entry { - unsigned long element; - }; - unsigned long value; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spinlock_t lock; -+#endif - #ifdef CONFIG_ZRAM_MEMORY_TRACKING - ktime_t ac_time; - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch deleted file mode 100644 index 716da21c6..000000000 --- a/kernel/patches-4.19.x-rt/0249-drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 88b91a35eb7f80af3482c12cfcb4ceebca3a88d8 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Thu, 20 Oct 2016 11:15:22 +0200 -Subject: [PATCH 249/328] drivers/zram: Don't disable preemption in - zcomp_stream_get/put() - -In v4.7, the driver switched to percpu compression streams, disabling -preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We -also have to fix an lock order issue in zram_decompress_page() such -that zs_map_object() nests inside of zcomp_stream_put() as it does in -zram_bvec_write(). - -Signed-off-by: Mike Galbraith -[bigeasy: get_locked_var() -> per zcomp_strm lock] -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/block/zram/zcomp.c | 12 ++++++++++-- - drivers/block/zram/zcomp.h | 1 + - drivers/block/zram/zram_drv.c | 5 +++-- - 3 files changed, 14 insertions(+), 4 deletions(-) - -diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c -index 4ed0a78fdc09..dd65a27ae2cc 100644 ---- a/drivers/block/zram/zcomp.c -+++ b/drivers/block/zram/zcomp.c -@@ -116,12 +116,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf) - - struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) - { -- return *get_cpu_ptr(comp->stream); -+ struct zcomp_strm *zstrm; -+ -+ zstrm = *this_cpu_ptr(comp->stream); -+ spin_lock(&zstrm->zcomp_lock); -+ return zstrm; - } - - void zcomp_stream_put(struct zcomp *comp) - { -- put_cpu_ptr(comp->stream); -+ struct zcomp_strm *zstrm; -+ -+ zstrm = *this_cpu_ptr(comp->stream); -+ spin_unlock(&zstrm->zcomp_lock); - } - - int zcomp_compress(struct zcomp_strm *zstrm, -@@ -171,6 +178,7 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) - pr_err("Can't allocate a compression stream\n"); - return -ENOMEM; - } -+ spin_lock_init(&zstrm->zcomp_lock); - *per_cpu_ptr(comp->stream, cpu) = zstrm; - return 0; - } -diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h -index 41c1002a7d7d..d424eafcbf8e 100644 ---- a/drivers/block/zram/zcomp.h -+++ b/drivers/block/zram/zcomp.h -@@ -14,6 +14,7 @@ struct zcomp_strm { - /* compression/decompression buffer */ - void *buffer; - struct crypto_comp *tfm; -+ spinlock_t zcomp_lock; - }; - - /* dynamic per-device compression frontend */ -diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c -index d6cf9508b80d..71520199226a 100644 ---- a/drivers/block/zram/zram_drv.c -+++ b/drivers/block/zram/zram_drv.c -@@ -1028,6 +1028,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, - unsigned long handle; - unsigned int size; - void *src, *dst; -+ struct zcomp_strm *zstrm; - - if (zram_wb_enabled(zram)) { - zram_slot_lock(zram, index); -@@ -1062,6 +1063,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, - - size = zram_get_obj_size(zram, index); - -+ zstrm = zcomp_stream_get(zram->comp); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - if (size == PAGE_SIZE) { - dst = kmap_atomic(page); -@@ -1069,14 +1071,13 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, - kunmap_atomic(dst); - ret = 0; - } else { -- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); - - dst = kmap_atomic(page); - ret = zcomp_decompress(zstrm, src, size, dst); - kunmap_atomic(dst); -- zcomp_stream_put(zram->comp); - } - zs_unmap_object(zram->mem_pool, handle); -+ zcomp_stream_put(zram->comp); - zram_slot_unlock(zram, index); - - /* Should NEVER happen. Return bio error if it does. */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch b/kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch deleted file mode 100644 index 172df52d8..000000000 --- a/kernel/patches-4.19.x-rt/0250-drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 4423c47dc0bff765fc58fb223ef9d4cd3b23c634 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Wed, 23 Aug 2017 11:57:29 +0200 -Subject: [PATCH 250/328] drivers/zram: fix zcomp_stream_get() - smp_processor_id() use in preemptible code - -Use get_local_ptr() instead this_cpu_ptr() to avoid a warning regarding -smp_processor_id() in preemptible code. -raw_cpu_ptr() would be fine, too because the per-CPU data structure is -protected with a spin lock so it does not matter much if we take the -other one. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/block/zram/zcomp.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c -index dd65a27ae2cc..eece02262000 100644 ---- a/drivers/block/zram/zcomp.c -+++ b/drivers/block/zram/zcomp.c -@@ -118,7 +118,7 @@ struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) - { - struct zcomp_strm *zstrm; - -- zstrm = *this_cpu_ptr(comp->stream); -+ zstrm = *get_local_ptr(comp->stream); - spin_lock(&zstrm->zcomp_lock); - return zstrm; - } -@@ -129,6 +129,7 @@ void zcomp_stream_put(struct zcomp *comp) - - zstrm = *this_cpu_ptr(comp->stream); - spin_unlock(&zstrm->zcomp_lock); -+ put_local_ptr(zstrm); - } - - int zcomp_compress(struct zcomp_strm *zstrm, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch b/kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch deleted file mode 100644 index 352a39f62..000000000 --- a/kernel/patches-4.19.x-rt/0251-tpm_tis-fix-stall-after-iowrite-s.patch +++ /dev/null @@ -1,83 +0,0 @@ -From fb812057ed5c5dbdbdd983f9d261dcce58bc7376 Mon Sep 17 00:00:00 2001 -From: Haris Okanovic -Date: Tue, 15 Aug 2017 15:13:08 -0500 -Subject: [PATCH 251/328] tpm_tis: fix stall after iowrite*()s - -ioread8() operations to TPM MMIO addresses can stall the cpu when -immediately following a sequence of iowrite*()'s to the same region. - -For example, cyclitest measures ~400us latency spikes when a non-RT -usermode application communicates with an SPI-based TPM chip (Intel Atom -E3940 system, PREEMPT_RT_FULL kernel). The spikes are caused by a -stalling ioread8() operation following a sequence of 30+ iowrite8()s to -the same address. I believe this happens because the write sequence is -buffered (in cpu or somewhere along the bus), and gets flushed on the -first LOAD instruction (ioread*()) that follows. - -The enclosed change appears to fix this issue: read the TPM chip's -access register (status code) after every iowrite*() operation to -amortize the cost of flushing data to chip across multiple instructions. - -Signed-off-by: Haris Okanovic -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/char/tpm/tpm_tis.c | 29 +++++++++++++++++++++++++++-- - 1 file changed, 27 insertions(+), 2 deletions(-) - -diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c -index f08949a5f678..9fefcfcae593 100644 ---- a/drivers/char/tpm/tpm_tis.c -+++ b/drivers/char/tpm/tpm_tis.c -@@ -53,6 +53,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da - return container_of(data, struct tpm_tis_tcg_phy, priv); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Flushes previous write operations to chip so that a subsequent -+ * ioread*()s won't stall a cpu. -+ */ -+static inline void tpm_tis_flush(void __iomem *iobase) -+{ -+ ioread8(iobase + TPM_ACCESS(0)); -+} -+#else -+#define tpm_tis_flush(iobase) do { } while (0) -+#endif -+ -+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr) -+{ -+ iowrite8(b, iobase + addr); -+ tpm_tis_flush(iobase); -+} -+ -+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr) -+{ -+ iowrite32(b, iobase + addr); -+ tpm_tis_flush(iobase); -+} -+ - static bool interrupts = true; - module_param(interrupts, bool, 0444); - MODULE_PARM_DESC(interrupts, "Enable interrupts"); -@@ -150,7 +175,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - - while (len--) -- iowrite8(*value++, phy->iobase + addr); -+ tpm_tis_iowrite8(*value++, phy->iobase, addr); - - return 0; - } -@@ -177,7 +202,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) - { - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - -- iowrite32(value, phy->iobase + addr); -+ tpm_tis_iowrite32(value, phy->iobase, addr); - - return 0; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch deleted file mode 100644 index 276e7186a..000000000 --- a/kernel/patches-4.19.x-rt/0252-watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch +++ /dev/null @@ -1,80 +0,0 @@ -From e0da41ef4940b26cb433c9598a4940e2476d3dbe Mon Sep 17 00:00:00 2001 -From: Julia Cartwright -Date: Fri, 28 Sep 2018 21:03:51 +0000 -Subject: [PATCH 252/328] watchdog: prevent deferral of watchdogd wakeup on RT - -When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are -deferred for execution into the context of ktimersoftd unless otherwise -annotated. - -Deferring the expiry of the hrtimer used by the watchdog core, however, -is a waste, as the callback does nothing but queue a kthread work item -and wakeup watchdogd. - -It's worst then that, too: the deferral through ktimersoftd also means -that for correct behavior a user must adjust the scheduling parameters -of both watchdogd _and_ ktimersoftd, which is unnecessary and has other -side effects (like causing unrelated expiry functions to execute at -potentially elevated priority). - -Instead, mark the hrtimer used by the watchdog core as being _HARD to -allow it's execution directly from hardirq context. The work done in -this expiry function is well-bounded and minimal. - -A user still must adjust the scheduling parameters of the watchdogd -to be correct w.r.t. their application needs. - -Cc: Guenter Roeck -Reported-and-tested-by: Steffen Trumtrar -Reported-by: Tim Sander -Signed-off-by: Julia Cartwright -Acked-by: Guenter Roeck -[bigeasy: use only HRTIMER_MODE_REL_HARD] -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/watchdog/watchdog_dev.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c -index e64aa88e99da..11ba2c3b187f 100644 ---- a/drivers/watchdog/watchdog_dev.c -+++ b/drivers/watchdog/watchdog_dev.c -@@ -145,7 +145,7 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd) - ktime_t t = watchdog_next_keepalive(wdd); - - if (t > 0) -- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL); -+ hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL_HARD); - } else { - hrtimer_cancel(&wd_data->timer); - } -@@ -164,7 +164,7 @@ static int __watchdog_ping(struct watchdog_device *wdd) - if (ktime_after(earliest_keepalive, now)) { - hrtimer_start(&wd_data->timer, - ktime_sub(earliest_keepalive, now), -- HRTIMER_MODE_REL); -+ HRTIMER_MODE_REL_HARD); - return 0; - } - -@@ -947,7 +947,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) - return -ENODEV; - - kthread_init_work(&wd_data->work, watchdog_ping_work); -- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - wd_data->timer.function = watchdog_timer_expired; - - if (wdd->id == 0) { -@@ -1004,7 +1004,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) - __module_get(wdd->ops->owner); - get_device(&wd_data->dev); - if (handle_boot_enabled) -- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL); -+ hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL_HARD); - else - pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", - wdd->id); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch b/kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch deleted file mode 100644 index 264e44f6b..000000000 --- a/kernel/patches-4.19.x-rt/0253-drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch +++ /dev/null @@ -1,60 +0,0 @@ -From e3f6a9293311449cca5306673eae21802410ed18 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Sat, 27 Feb 2016 08:09:11 +0100 -Subject: [PATCH 253/328] drm,radeon,i915: Use preempt_disable/enable_rt() - where recommended - -DRM folks identified the spots, so use them. - -Signed-off-by: Mike Galbraith -Cc: Sebastian Andrzej Siewior -Cc: linux-rt-users -Signed-off-by: Thomas Gleixner ---- - drivers/gpu/drm/i915/i915_irq.c | 2 ++ - drivers/gpu/drm/radeon/radeon_display.c | 2 ++ - 2 files changed, 4 insertions(+) - -diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 29877969310d..f65817c51c2a 100644 ---- a/drivers/gpu/drm/i915/i915_irq.c -+++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_disable_rt(); - - /* Get optional system timestamp before query. */ - if (stime) -@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - *etime = ktime_get(); - - /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_enable_rt(); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - -diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c -index 7d1e14f0140a..bcc3456749d6 100644 ---- a/drivers/gpu/drm/radeon/radeon_display.c -+++ b/drivers/gpu/drm/radeon/radeon_display.c -@@ -1815,6 +1815,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - struct radeon_device *rdev = dev->dev_private; - - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_disable_rt(); - - /* Get optional system timestamp before query. */ - if (stime) -@@ -1907,6 +1908,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - *etime = ktime_get(); - - /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_enable_rt(); - - /* Decode into vertical and horizontal scanout position. */ - *vpos = position & 0x1fff; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch b/kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch deleted file mode 100644 index a418352b9..000000000 --- a/kernel/patches-4.19.x-rt/0254-drm-i915-Use-local_lock-unlock_irq-in-intel_pipe_upd.patch +++ /dev/null @@ -1,127 +0,0 @@ -From ae86e298bae75579cf4bc0009251aad847d4d4a2 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Sat, 27 Feb 2016 09:01:42 +0100 -Subject: [PATCH 254/328] drm,i915: Use local_lock/unlock_irq() in - intel_pipe_update_start/end() - -[ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918 -[ 8.014041] in_atomic(): 0, irqs_disabled(): 1, pid: 78, name: kworker/u4:4 -[ 8.014045] CPU: 1 PID: 78 Comm: kworker/u4:4 Not tainted 4.1.7-rt7 #5 -[ 8.014055] Workqueue: events_unbound async_run_entry_fn -[ 8.014059] 0000000000000000 ffff880037153748 ffffffff815f32c9 0000000000000002 -[ 8.014063] ffff88013a50e380 ffff880037153768 ffffffff815ef075 ffff8800372c06c8 -[ 8.014066] ffff8800372c06c8 ffff880037153778 ffffffff8107c0b3 ffff880037153798 -[ 8.014067] Call Trace: -[ 8.014074] [] dump_stack+0x4a/0x61 -[ 8.014078] [] ___might_sleep.part.93+0xe9/0xee -[ 8.014082] [] ___might_sleep+0x53/0x80 -[ 8.014086] [] rt_spin_lock+0x24/0x50 -[ 8.014090] [] prepare_to_wait+0x2b/0xa0 -[ 8.014152] [] intel_pipe_update_start+0x17c/0x300 [i915] -[ 8.014156] [] ? prepare_to_wait_event+0x120/0x120 -[ 8.014201] [] intel_begin_crtc_commit+0x166/0x1e0 [i915] -[ 8.014215] [] drm_atomic_helper_commit_planes+0x5d/0x1a0 [drm_kms_helper] -[ 8.014260] [] intel_atomic_commit+0xab/0xf0 [i915] -[ 8.014288] [] drm_atomic_commit+0x37/0x60 [drm] -[ 8.014298] [] drm_atomic_helper_plane_set_property+0x8d/0xd0 [drm_kms_helper] -[ 8.014301] [] ? __ww_mutex_lock+0x39/0x40 -[ 8.014319] [] drm_mode_plane_set_obj_prop+0x2d/0x90 [drm] -[ 8.014328] [] restore_fbdev_mode+0x6b/0xf0 [drm_kms_helper] -[ 8.014337] [] drm_fb_helper_restore_fbdev_mode_unlocked+0x29/0x80 [drm_kms_helper] -[ 8.014346] [] drm_fb_helper_set_par+0x22/0x50 [drm_kms_helper] -[ 8.014390] [] intel_fbdev_set_par+0x1a/0x60 [i915] -[ 8.014394] [] fbcon_init+0x4f4/0x580 -[ 8.014398] [] visual_init+0xbc/0x120 -[ 8.014401] [] do_bind_con_driver+0x163/0x330 -[ 8.014405] [] do_take_over_console+0x11c/0x1c0 -[ 8.014408] [] do_fbcon_takeover+0x63/0xd0 -[ 8.014410] [] fbcon_event_notify+0x785/0x8d0 -[ 8.014413] [] ? __might_sleep+0x4d/0x90 -[ 8.014416] [] notifier_call_chain+0x4e/0x80 -[ 8.014419] [] __blocking_notifier_call_chain+0x4d/0x70 -[ 8.014422] [] blocking_notifier_call_chain+0x16/0x20 -[ 8.014425] [] fb_notifier_call_chain+0x1b/0x20 -[ 8.014428] [] register_framebuffer+0x21a/0x350 -[ 8.014439] [] drm_fb_helper_initial_config+0x274/0x3e0 [drm_kms_helper] -[ 8.014483] [] intel_fbdev_initial_config+0x1b/0x20 [i915] -[ 8.014486] [] async_run_entry_fn+0x4c/0x160 -[ 8.014490] [] process_one_work+0x14a/0x470 -[ 8.014493] [] worker_thread+0x169/0x4c0 -[ 8.014496] [] ? process_one_work+0x470/0x470 -[ 8.014499] [] kthread+0xc6/0xe0 -[ 8.014502] [] ? queue_work_on+0x80/0x110 -[ 8.014506] [] ? kthread_worker_fn+0x1c0/0x1c0 - -Signed-off-by: Mike Galbraith -Cc: Sebastian Andrzej Siewior -Cc: linux-rt-users -Signed-off-by: Thomas Gleixner ---- - drivers/gpu/drm/i915/intel_sprite.c | 13 ++++++++----- - 1 file changed, 8 insertions(+), 5 deletions(-) - -diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c -index f7026e887fa9..07e4ddebdd80 100644 ---- a/drivers/gpu/drm/i915/intel_sprite.c -+++ b/drivers/gpu/drm/i915/intel_sprite.c -@@ -36,6 +36,7 @@ - #include - #include - #include -+#include - #include "intel_drv.h" - #include "intel_frontbuffer.h" - #include -@@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - #define VBLANK_EVASION_TIME_US 100 - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); -+ - /** - * intel_pipe_update_start() - start update of a set of display registers - * @new_crtc_state: the new crtc state -@@ -107,7 +110,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) - if (intel_psr_wait_for_idle(new_crtc_state)) - DRM_ERROR("PSR idle timed out, atomic update may fail\n"); - -- local_irq_disable(); -+ local_lock_irq(pipe_update_lock); - - crtc->debug.min_vbl = min; - crtc->debug.max_vbl = max; -@@ -131,11 +134,11 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) - break; - } - -- local_irq_enable(); -+ local_unlock_irq(pipe_update_lock); - - timeout = schedule_timeout(timeout); - -- local_irq_disable(); -+ local_lock_irq(pipe_update_lock); - } - - finish_wait(wq, &wait); -@@ -168,7 +171,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) - return; - - irq_disable: -- local_irq_disable(); -+ local_lock_irq(pipe_update_lock); - } - - /** -@@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) - new_crtc_state->base.event = NULL; - } - -- local_irq_enable(); -+ local_unlock_irq(pipe_update_lock); - - if (intel_vgpu_active(dev_priv)) - return; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch b/kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch deleted file mode 100644 index ef806ed09..000000000 --- a/kernel/patches-4.19.x-rt/0255-drm-i915-disable-tracing-on-RT.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 3aed1fe6522f79b259e8106ff18e0c8324020675 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 6 Dec 2018 09:52:20 +0100 -Subject: [PATCH 255/328] drm/i915: disable tracing on -RT - -Luca Abeni reported this: -| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003 -| CPU: 1 PID: 15203 Comm: kworker/u8:2 Not tainted 4.19.1-rt3 #10 -| Call Trace: -| rt_spin_lock+0x3f/0x50 -| gen6_read32+0x45/0x1d0 [i915] -| g4x_get_vblank_counter+0x36/0x40 [i915] -| trace_event_raw_event_i915_pipe_update_start+0x7d/0xf0 [i915] - -The tracing events use trace_i915_pipe_update_start() among other events -use functions acquire spin locks. A few trace points use -intel_get_crtc_scanline(), others use ->get_vblank_counter() wich also -might acquire a sleeping lock. - -Based on this I don't see any other way than disable trace points on RT. - -Cc: stable-rt@vger.kernel.org -Reported-by: Luca Abeni -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/gpu/drm/i915/i915_trace.h | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h -index b50c6b829715..cc54ec0ef75c 100644 ---- a/drivers/gpu/drm/i915/i915_trace.h -+++ b/drivers/gpu/drm/i915/i915_trace.h -@@ -2,6 +2,10 @@ - #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) - #define _I915_TRACE_H_ - -+#ifdef CONFIG_PREEMPT_RT_BASE -+#define NOTRACE -+#endif -+ - #include - #include - #include --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch deleted file mode 100644 index d9e790b0e..000000000 --- a/kernel/patches-4.19.x-rt/0256-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 3d4557d91951a9964b97c806777b13cca8e11dee Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 19 Dec 2018 10:47:02 +0100 -Subject: [PATCH 256/328] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with - NOTRACE - -The order of the header files is important. If this header file is -included after tracepoint.h was included then the NOTRACE here becomes a -nop. Currently this happens for two .c files which use the tracepoitns -behind DRM_I915_LOW_LEVEL_TRACEPOINTS. - -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/gpu/drm/i915/i915_trace.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h -index cc54ec0ef75c..33028d8f470e 100644 ---- a/drivers/gpu/drm/i915/i915_trace.h -+++ b/drivers/gpu/drm/i915/i915_trace.h -@@ -683,7 +683,7 @@ DEFINE_EVENT(i915_request, i915_request_add, - TP_ARGS(rq) - ); - --#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) -+#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE) - DEFINE_EVENT(i915_request, i915_request_submit, - TP_PROTO(struct i915_request *rq), - TP_ARGS(rq) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch b/kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch deleted file mode 100644 index e212d96b1..000000000 --- a/kernel/patches-4.19.x-rt/0257-cgroups-use-simple-wait-in-css_release.patch +++ /dev/null @@ -1,94 +0,0 @@ -From 33ef8c81b2c33d47247da6b0b5d534c41dd7359a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 13 Feb 2015 15:52:24 +0100 -Subject: [PATCH 257/328] cgroups: use simple wait in css_release() - -To avoid: -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 -|in_atomic(): 1, irqs_disabled(): 0, pid: 92, name: rcuc/11 -|2 locks held by rcuc/11/92: -| #0: (rcu_callback){......}, at: [] rcu_cpu_kthread+0x3de/0x940 -| #1: (rcu_read_lock_sched){......}, at: [] percpu_ref_call_confirm_rcu+0x0/0xd0 -|Preemption disabled at:[] percpu_ref_switch_to_atomic_rcu+0x82/0xc0 -|CPU: 11 PID: 92 Comm: rcuc/11 Not tainted 3.18.7-rt0+ #1 -| ffff8802398cdf80 ffff880235f0bc28 ffffffff815b3a12 0000000000000000 -| 0000000000000000 ffff880235f0bc48 ffffffff8109aa16 0000000000000000 -| ffff8802398cdf80 ffff880235f0bc78 ffffffff815b8dd4 000000000000df80 -|Call Trace: -| [] dump_stack+0x4f/0x7c -| [] __might_sleep+0x116/0x190 -| [] rt_spin_lock+0x24/0x60 -| [] queue_work_on+0x6d/0x1d0 -| [] css_release+0x81/0x90 -| [] percpu_ref_call_confirm_rcu+0xbe/0xd0 -| [] percpu_ref_switch_to_atomic_rcu+0x82/0xc0 -| [] rcu_cpu_kthread+0x445/0x940 -| [] smpboot_thread_fn+0x18d/0x2d0 -| [] kthread+0xe8/0x100 -| [] ret_from_fork+0x7c/0xb0 - -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/cgroup-defs.h | 2 ++ - kernel/cgroup/cgroup.c | 9 +++++---- - 2 files changed, 7 insertions(+), 4 deletions(-) - -diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h -index a01ebb630abc..34fb541e90be 100644 ---- a/include/linux/cgroup-defs.h -+++ b/include/linux/cgroup-defs.h -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_CGROUPS - -@@ -157,6 +158,7 @@ struct cgroup_subsys_state { - - /* percpu_ref killing and RCU release */ - struct work_struct destroy_work; -+ struct swork_event destroy_swork; - struct rcu_work destroy_rwork; - - /* -diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c -index 08bd40d90066..aceac97f78f4 100644 ---- a/kernel/cgroup/cgroup.c -+++ b/kernel/cgroup/cgroup.c -@@ -4667,10 +4667,10 @@ static void css_free_rwork_fn(struct work_struct *work) - } - } - --static void css_release_work_fn(struct work_struct *work) -+static void css_release_work_fn(struct swork_event *sev) - { - struct cgroup_subsys_state *css = -- container_of(work, struct cgroup_subsys_state, destroy_work); -+ container_of(sev, struct cgroup_subsys_state, destroy_swork); - struct cgroup_subsys *ss = css->ss; - struct cgroup *cgrp = css->cgroup; - -@@ -4732,8 +4732,8 @@ static void css_release(struct percpu_ref *ref) - struct cgroup_subsys_state *css = - container_of(ref, struct cgroup_subsys_state, refcnt); - -- INIT_WORK(&css->destroy_work, css_release_work_fn); -- queue_work(cgroup_destroy_wq, &css->destroy_work); -+ INIT_SWORK(&css->destroy_swork, css_release_work_fn); -+ swork_queue(&css->destroy_swork); - } - - static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5459,6 +5459,7 @@ static int __init cgroup_wq_init(void) - */ - cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); - BUG_ON(!cgroup_destroy_wq); -+ BUG_ON(swork_get()); - return 0; - } - core_initcall(cgroup_wq_init); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch deleted file mode 100644 index 4ff73a31f..000000000 --- a/kernel/patches-4.19.x-rt/0258-cpuset-Convert-callback_lock-to-raw_spinlock_t.patch +++ /dev/null @@ -1,292 +0,0 @@ -From 86e3ee73627056c1c97053b2201b942bfad09b26 Mon Sep 17 00:00:00 2001 -From: Mike Galbraith -Date: Sun, 8 Jan 2017 09:32:25 +0100 -Subject: [PATCH 258/328] cpuset: Convert callback_lock to raw_spinlock_t - -The two commits below add up to a cpuset might_sleep() splat for RT: - -8447a0fee974 cpuset: convert callback_mutex to a spinlock -344736f29b35 cpuset: simplify cpuset_node_allowed API - -BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:995 -in_atomic(): 0, irqs_disabled(): 1, pid: 11718, name: cset -CPU: 135 PID: 11718 Comm: cset Tainted: G E 4.10.0-rt1-rt #4 -Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRHSXSD1.86B.0056.R01.1409242327 09/24/2014 -Call Trace: - ? dump_stack+0x5c/0x81 - ? ___might_sleep+0xf4/0x170 - ? rt_spin_lock+0x1c/0x50 - ? __cpuset_node_allowed+0x66/0xc0 - ? ___slab_alloc+0x390/0x570 - ? anon_vma_fork+0x8f/0x140 - ? copy_page_range+0x6cf/0xb00 - ? anon_vma_fork+0x8f/0x140 - ? __slab_alloc.isra.74+0x5a/0x81 - ? anon_vma_fork+0x8f/0x140 - ? kmem_cache_alloc+0x1b5/0x1f0 - ? anon_vma_fork+0x8f/0x140 - ? copy_process.part.35+0x1670/0x1ee0 - ? _do_fork+0xdd/0x3f0 - ? _do_fork+0xdd/0x3f0 - ? do_syscall_64+0x61/0x170 - ? entry_SYSCALL64_slow_path+0x25/0x25 - -The later ensured that a NUMA box WILL take callback_lock in atomic -context by removing the allocator and reclaim path __GFP_HARDWALL -usage which prevented such contexts from taking callback_mutex. - -One option would be to reinstate __GFP_HARDWALL protections for -RT, however, as the 8447a0fee974 changelog states: - -The callback_mutex is only used to synchronize reads/updates of cpusets' -flags and cpu/node masks. These operations should always proceed fast so -there's no reason why we can't use a spinlock instead of the mutex. - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cgroup/cpuset.c | 66 +++++++++++++++++++++--------------------- - 1 file changed, 33 insertions(+), 33 deletions(-) - -diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index 7bb129c5b412..92575cb9b493 100644 ---- a/kernel/cgroup/cpuset.c -+++ b/kernel/cgroup/cpuset.c -@@ -288,7 +288,7 @@ static struct cpuset top_cpuset = { - */ - - static DEFINE_MUTEX(cpuset_mutex); --static DEFINE_SPINLOCK(callback_lock); -+static DEFINE_RAW_SPINLOCK(callback_lock); - - static struct workqueue_struct *cpuset_migrate_mm_wq; - -@@ -922,9 +922,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) - continue; - rcu_read_unlock(); - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cpumask_copy(cp->effective_cpus, new_cpus); -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - WARN_ON(!is_in_v2_mode() && - !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); -@@ -989,9 +989,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, - if (retval < 0) - return retval; - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - /* use trialcs->cpus_allowed as a temp variable */ - update_cpumasks_hier(cs, trialcs->cpus_allowed); -@@ -1175,9 +1175,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) - continue; - rcu_read_unlock(); - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cp->effective_mems = *new_mems; -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - WARN_ON(!is_in_v2_mode() && - !nodes_equal(cp->mems_allowed, cp->effective_mems)); -@@ -1245,9 +1245,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, - if (retval < 0) - goto done; - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cs->mems_allowed = trialcs->mems_allowed; -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - /* use trialcs->mems_allowed as a temp variable */ - update_nodemasks_hier(cs, &trialcs->mems_allowed); -@@ -1338,9 +1338,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, - spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) - || (is_spread_page(cs) != is_spread_page(trialcs))); - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cs->flags = trialcs->flags; -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) - rebuild_sched_domains_locked(); -@@ -1755,7 +1755,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) - cpuset_filetype_t type = seq_cft(sf)->private; - int ret = 0; - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - - switch (type) { - case FILE_CPULIST: -@@ -1774,7 +1774,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) - ret = -EINVAL; - } - -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - return ret; - } - -@@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) - - cpuset_inc(); - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - if (is_in_v2_mode()) { - cpumask_copy(cs->effective_cpus, parent->effective_cpus); - cs->effective_mems = parent->effective_mems; - } -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) - goto out_unlock; -@@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) - } - rcu_read_unlock(); - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cs->mems_allowed = parent->mems_allowed; - cs->effective_mems = parent->mems_allowed; - cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); - cpumask_copy(cs->effective_cpus, parent->cpus_allowed); -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - out_unlock: - mutex_unlock(&cpuset_mutex); - return 0; -@@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) - static void cpuset_bind(struct cgroup_subsys_state *root_css) - { - mutex_lock(&cpuset_mutex); -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - - if (is_in_v2_mode()) { - cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); -@@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) - top_cpuset.mems_allowed = top_cpuset.effective_mems; - } - -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - mutex_unlock(&cpuset_mutex); - } - -@@ -2174,12 +2174,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, - { - bool is_empty; - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cpumask_copy(cs->cpus_allowed, new_cpus); - cpumask_copy(cs->effective_cpus, new_cpus); - cs->mems_allowed = *new_mems; - cs->effective_mems = *new_mems; -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - /* - * Don't call update_tasks_cpumask() if the cpuset becomes empty, -@@ -2216,10 +2216,10 @@ hotplug_update_tasks(struct cpuset *cs, - if (nodes_empty(*new_mems)) - *new_mems = parent_cs(cs)->effective_mems; - -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - cpumask_copy(cs->effective_cpus, new_cpus); - cs->effective_mems = *new_mems; -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - - if (cpus_updated) - update_tasks_cpumask(cs); -@@ -2312,21 +2312,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work) - - /* synchronize cpus_allowed to cpu_active_mask */ - if (cpus_updated) { -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - if (!on_dfl) - cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); - cpumask_copy(top_cpuset.effective_cpus, &new_cpus); -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - /* we don't mess with cpumasks of tasks in top_cpuset */ - } - - /* synchronize mems_allowed to N_MEMORY */ - if (mems_updated) { -- spin_lock_irq(&callback_lock); -+ raw_spin_lock_irq(&callback_lock); - if (!on_dfl) - top_cpuset.mems_allowed = new_mems; - top_cpuset.effective_mems = new_mems; -- spin_unlock_irq(&callback_lock); -+ raw_spin_unlock_irq(&callback_lock); - update_tasks_nodemask(&top_cpuset); - } - -@@ -2425,11 +2425,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) - { - unsigned long flags; - -- spin_lock_irqsave(&callback_lock, flags); -+ raw_spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); - guarantee_online_cpus(task_cs(tsk), pmask); - rcu_read_unlock(); -- spin_unlock_irqrestore(&callback_lock, flags); -+ raw_spin_unlock_irqrestore(&callback_lock, flags); - } - - /** -@@ -2490,11 +2490,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) - nodemask_t mask; - unsigned long flags; - -- spin_lock_irqsave(&callback_lock, flags); -+ raw_spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); - guarantee_online_mems(task_cs(tsk), &mask); - rcu_read_unlock(); -- spin_unlock_irqrestore(&callback_lock, flags); -+ raw_spin_unlock_irqrestore(&callback_lock, flags); - - return mask; - } -@@ -2586,14 +2586,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) - return true; - - /* Not hardwall and node outside mems_allowed: scan up cpusets */ -- spin_lock_irqsave(&callback_lock, flags); -+ raw_spin_lock_irqsave(&callback_lock, flags); - - rcu_read_lock(); - cs = nearest_hardwall_ancestor(task_cs(current)); - allowed = node_isset(node, cs->mems_allowed); - rcu_read_unlock(); - -- spin_unlock_irqrestore(&callback_lock, flags); -+ raw_spin_unlock_irqrestore(&callback_lock, flags); - return allowed; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch b/kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch deleted file mode 100644 index bdb3c4af7..000000000 --- a/kernel/patches-4.19.x-rt/0259-apparmor-use-a-locallock-instead-preempt_disable.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 7d1cc83845fbfeae6cbe51a05849c8934e40bc56 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 11 Oct 2017 17:43:49 +0200 -Subject: [PATCH 259/328] apparmor: use a locallock instead preempt_disable() - -get_buffers() disables preemption which acts as a lock for the per-CPU -variable. Since we can't disable preemption here on RT, a local_lock is -lock is used in order to remain on the same CPU and not to have more -than one user within the critical section. - -Signed-off-by: Sebastian Andrzej Siewior ---- - security/apparmor/include/path.h | 19 ++++++++++++++++--- - security/apparmor/lsm.c | 2 +- - 2 files changed, 17 insertions(+), 4 deletions(-) - -diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h -index b6380c5f0097..12abfddb19c9 100644 ---- a/security/apparmor/include/path.h -+++ b/security/apparmor/include/path.h -@@ -40,8 +40,10 @@ struct aa_buffers { - - #include - #include -+#include - - DECLARE_PER_CPU(struct aa_buffers, aa_buffers); -+DECLARE_LOCAL_IRQ_LOCK(aa_buffers_lock); - - #define ASSIGN(FN, A, X, N) ((X) = FN(A, N)) - #define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/ -@@ -51,7 +53,17 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers); - - #define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++) - --#ifdef CONFIG_DEBUG_PREEMPT -+#ifdef CONFIG_PREEMPT_RT_BASE -+static inline void AA_BUG_PREEMPT_ENABLED(const char *s) -+{ -+ struct local_irq_lock *lv; -+ -+ lv = this_cpu_ptr(&aa_buffers_lock); -+ WARN_ONCE(lv->owner != current, -+ "__get_buffer without aa_buffers_lock\n"); -+} -+ -+#elif defined(CONFIG_DEBUG_PREEMPT) - #define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X) - #else - #define AA_BUG_PREEMPT_ENABLED(X) /* nop */ -@@ -67,14 +79,15 @@ DECLARE_PER_CPU(struct aa_buffers, aa_buffers); - - #define get_buffers(X...) \ - do { \ -- struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \ -+ struct aa_buffers *__cpu_var; \ -+ __cpu_var = get_locked_ptr(aa_buffers_lock, &aa_buffers); \ - __get_buffers(__cpu_var, X); \ - } while (0) - - #define put_buffers(X, Y...) \ - do { \ - __put_buffers(X, Y); \ -- put_cpu_ptr(&aa_buffers); \ -+ put_locked_ptr(aa_buffers_lock, &aa_buffers); \ - } while (0) - - #endif /* __AA_PATH_H */ -diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c -index 730de4638b4e..edc911ff6a4d 100644 ---- a/security/apparmor/lsm.c -+++ b/security/apparmor/lsm.c -@@ -45,7 +45,7 @@ - int apparmor_initialized; - - DEFINE_PER_CPU(struct aa_buffers, aa_buffers); -- -+DEFINE_LOCAL_IRQ_LOCK(aa_buffers_lock); - - /* - * LSM hook functions --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch b/kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch deleted file mode 100644 index 42dbae35e..000000000 --- a/kernel/patches-4.19.x-rt/0260-workqueue-Prevent-deadlock-stall-on-RT.patch +++ /dev/null @@ -1,206 +0,0 @@ -From 3a793fb8d0b6d6f02988068bdba2a88b42d6c9c7 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 27 Jun 2014 16:24:52 +0200 -Subject: [PATCH 260/328] workqueue: Prevent deadlock/stall on RT - -Austin reported a XFS deadlock/stall on RT where scheduled work gets -never exececuted and tasks are waiting for each other for ever. - -The underlying problem is the modification of the RT code to the -handling of workers which are about to go to sleep. In mainline a -worker thread which goes to sleep wakes an idle worker if there is -more work to do. This happens from the guts of the schedule() -function. On RT this must be outside and the accessed data structures -are not protected against scheduling due to the spinlock to rtmutex -conversion. So the naive solution to this was to move the code outside -of the scheduler and protect the data structures by the pool -lock. That approach turned out to be a little naive as we cannot call -into that code when the thread blocks on a lock, as it is not allowed -to block on two locks in parallel. So we dont call into the worker -wakeup magic when the worker is blocked on a lock, which causes the -deadlock/stall observed by Austin and Mike. - -Looking deeper into that worker code it turns out that the only -relevant data structure which needs to be protected is the list of -idle workers which can be woken up. - -So the solution is to protect the list manipulation operations with -preempt_enable/disable pairs on RT and call unconditionally into the -worker code even when the worker is blocked on a lock. The preemption -protection is safe as there is nothing which can fiddle with the list -outside of thread context. - -Reported-and_tested-by: Austin Schuh -Reported-and_tested-by: Mike Galbraith -Signed-off-by: Thomas Gleixner -Link: http://vger.kernel.org/r/alpine.DEB.2.10.1406271249510.5170@nanos -Cc: Richard Weinberger -Cc: Steven Rostedt ---- - kernel/sched/core.c | 6 +++-- - kernel/workqueue.c | 60 +++++++++++++++++++++++++++++++++++---------- - 2 files changed, 51 insertions(+), 15 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index a17c765d3fcb..cfde725e1017 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3610,9 +3610,8 @@ void __noreturn do_task_dead(void) - - static inline void sched_submit_work(struct task_struct *tsk) - { -- if (!tsk->state || tsk_is_pi_blocked(tsk)) -+ if (!tsk->state) - return; -- - /* - * If a worker went to sleep, notify and ask workqueue whether - * it wants to wake up a task to maintain concurrency. -@@ -3626,6 +3625,9 @@ static inline void sched_submit_work(struct task_struct *tsk) - preempt_enable_no_resched(); - } - -+ if (tsk_is_pi_blocked(tsk)) -+ return; -+ - /* - * If we are going to sleep and we have plugged IO queued, - * make sure to submit it to avoid deadlocks. -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 045b82ca0eb5..714d0cfe4d56 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -125,6 +125,11 @@ enum { - * cpu or grabbing pool->lock is enough for read access. If - * POOL_DISASSOCIATED is set, it's identical to L. - * -+ * On RT we need the extra protection via rt_lock_idle_list() for -+ * the list manipulations against read access from -+ * wq_worker_sleeping(). All other places are nicely serialized via -+ * pool->lock. -+ * - * A: wq_pool_attach_mutex protected. - * - * PL: wq_pool_mutex protected. -@@ -430,6 +435,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ - else - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static inline void rt_lock_idle_list(struct worker_pool *pool) -+{ -+ preempt_disable(); -+} -+static inline void rt_unlock_idle_list(struct worker_pool *pool) -+{ -+ preempt_enable(); -+} -+static inline void sched_lock_idle_list(struct worker_pool *pool) { } -+static inline void sched_unlock_idle_list(struct worker_pool *pool) { } -+#else -+static inline void rt_lock_idle_list(struct worker_pool *pool) { } -+static inline void rt_unlock_idle_list(struct worker_pool *pool) { } -+static inline void sched_lock_idle_list(struct worker_pool *pool) -+{ -+ spin_lock_irq(&pool->lock); -+} -+static inline void sched_unlock_idle_list(struct worker_pool *pool) -+{ -+ spin_unlock_irq(&pool->lock); -+} -+#endif -+ -+ - #ifdef CONFIG_DEBUG_OBJECTS_WORK - - static struct debug_obj_descr work_debug_descr; -@@ -836,10 +866,16 @@ static struct worker *first_idle_worker(struct worker_pool *pool) - */ - static void wake_up_worker(struct worker_pool *pool) - { -- struct worker *worker = first_idle_worker(pool); -+ struct worker *worker; -+ -+ rt_lock_idle_list(pool); -+ -+ worker = first_idle_worker(pool); - - if (likely(worker)) - wake_up_process(worker->task); -+ -+ rt_unlock_idle_list(pool); - } - - /** -@@ -868,7 +904,7 @@ void wq_worker_running(struct task_struct *task) - */ - void wq_worker_sleeping(struct task_struct *task) - { -- struct worker *next, *worker = kthread_data(task); -+ struct worker *worker = kthread_data(task); - struct worker_pool *pool; - - /* -@@ -885,26 +921,18 @@ void wq_worker_sleeping(struct task_struct *task) - return; - - worker->sleeping = 1; -- spin_lock_irq(&pool->lock); - - /* - * The counterpart of the following dec_and_test, implied mb, - * worklist not empty test sequence is in insert_work(). - * Please read comment there. -- * -- * NOT_RUNNING is clear. This means that we're bound to and -- * running on the local cpu w/ rq lock held and preemption -- * disabled, which in turn means that none else could be -- * manipulating idle_list, so dereferencing idle_list without pool -- * lock is safe. - */ - if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) { -- next = first_idle_worker(pool); -- if (next) -- wake_up_process(next->task); -+ sched_lock_idle_list(pool); -+ wake_up_worker(pool); -+ sched_unlock_idle_list(pool); - } -- spin_unlock_irq(&pool->lock); - } - - /** -@@ -1675,7 +1703,9 @@ static void worker_enter_idle(struct worker *worker) - worker->last_active = jiffies; - - /* idle_list is LIFO */ -+ rt_lock_idle_list(pool); - list_add(&worker->entry, &pool->idle_list); -+ rt_unlock_idle_list(pool); - - if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) - mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); -@@ -1708,7 +1738,9 @@ static void worker_leave_idle(struct worker *worker) - return; - worker_clr_flags(worker, WORKER_IDLE); - pool->nr_idle--; -+ rt_lock_idle_list(pool); - list_del_init(&worker->entry); -+ rt_unlock_idle_list(pool); - } - - static struct worker *alloc_worker(int node) -@@ -1876,7 +1908,9 @@ static void destroy_worker(struct worker *worker) - pool->nr_workers--; - pool->nr_idle--; - -+ rt_lock_idle_list(pool); - list_del_init(&worker->entry); -+ rt_unlock_idle_list(pool); - worker->flags |= WORKER_DIE; - wake_up_process(worker->task); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch deleted file mode 100644 index 361e2642b..000000000 --- a/kernel/patches-4.19.x-rt/0261-signals-Allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ /dev/null @@ -1,212 +0,0 @@ -From e35585fe22932e9eb9cf60dc1080b05935cc99ce Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:44:56 -0500 -Subject: [PATCH 261/328] signals: Allow rt tasks to cache one sigqueue struct - -To avoid allocation allow rt tasks to cache one sigqueue struct in -task struct. - -Signed-off-by: Thomas Gleixner ---- - include/linux/sched.h | 2 ++ - include/linux/signal.h | 1 + - kernel/exit.c | 2 +- - kernel/fork.c | 1 + - kernel/signal.c | 69 +++++++++++++++++++++++++++++++++++++++--- - 5 files changed, 70 insertions(+), 5 deletions(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index a8ebd49c4f96..854a6cb456af 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -895,6 +895,8 @@ struct task_struct { - /* Signal handlers: */ - struct signal_struct *signal; - struct sighand_struct *sighand; -+ struct sigqueue *sigqueue_cache; -+ - sigset_t blocked; - sigset_t real_blocked; - /* Restored if set_restore_sigmask() was used: */ -diff --git a/include/linux/signal.h b/include/linux/signal.h -index 0be5ce2375cb..6495fda18c2c 100644 ---- a/include/linux/signal.h -+++ b/include/linux/signal.h -@@ -245,6 +245,7 @@ static inline void init_sigpending(struct sigpending *sig) - } - - extern void flush_sigqueue(struct sigpending *queue); -+extern void flush_task_sigqueue(struct task_struct *tsk); - - /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ - static inline int valid_signal(unsigned long sig) -diff --git a/kernel/exit.c b/kernel/exit.c -index 54c3269b8dda..c66f21193cf1 100644 ---- a/kernel/exit.c -+++ b/kernel/exit.c -@@ -160,7 +160,7 @@ static void __exit_signal(struct task_struct *tsk) - * Do this under ->siglock, we can race with another thread - * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. - */ -- flush_sigqueue(&tsk->pending); -+ flush_task_sigqueue(tsk); - tsk->sighand = NULL; - spin_unlock(&sighand->siglock); - -diff --git a/kernel/fork.c b/kernel/fork.c -index ecec0f8bef7e..234e0ca9a74b 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1832,6 +1832,7 @@ static __latent_entropy struct task_struct *copy_process( - spin_lock_init(&p->alloc_lock); - - init_sigpending(&p->pending); -+ p->sigqueue_cache = NULL; - - p->utime = p->stime = p->gtime = 0; - #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME -diff --git a/kernel/signal.c b/kernel/signal.c -index d5e764bb2444..b3b037f63c8a 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -397,13 +398,30 @@ void task_join_group_stop(struct task_struct *task) - } - } - -+static inline struct sigqueue *get_task_cache(struct task_struct *t) -+{ -+ struct sigqueue *q = t->sigqueue_cache; -+ -+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) -+ return NULL; -+ return q; -+} -+ -+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) -+{ -+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) -+ return 0; -+ return 1; -+} -+ - /* - * allocate a new signal queue record - * - this may be called without locks if and only if t == current, otherwise an - * appropriate lock must be held to stop the target task from exiting - */ - static struct sigqueue * --__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) -+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, -+ int override_rlimit, int fromslab) - { - struct sigqueue *q = NULL; - struct user_struct *user; -@@ -420,7 +438,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi - if (override_rlimit || - atomic_read(&user->sigpending) <= - task_rlimit(t, RLIMIT_SIGPENDING)) { -- q = kmem_cache_alloc(sigqueue_cachep, flags); -+ if (!fromslab) -+ q = get_task_cache(t); -+ if (!q) -+ q = kmem_cache_alloc(sigqueue_cachep, flags); - } else { - print_dropped_signal(sig); - } -@@ -437,6 +458,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi - return q; - } - -+static struct sigqueue * -+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, -+ int override_rlimit) -+{ -+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); -+} -+ - static void __sigqueue_free(struct sigqueue *q) - { - if (q->flags & SIGQUEUE_PREALLOC) -@@ -446,6 +474,21 @@ static void __sigqueue_free(struct sigqueue *q) - kmem_cache_free(sigqueue_cachep, q); - } - -+static void sigqueue_free_current(struct sigqueue *q) -+{ -+ struct user_struct *up; -+ -+ if (q->flags & SIGQUEUE_PREALLOC) -+ return; -+ -+ up = q->user; -+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { -+ atomic_dec(&up->sigpending); -+ free_uid(up); -+ } else -+ __sigqueue_free(q); -+} -+ - void flush_sigqueue(struct sigpending *queue) - { - struct sigqueue *q; -@@ -458,6 +501,21 @@ void flush_sigqueue(struct sigpending *queue) - } - } - -+/* -+ * Called from __exit_signal. Flush tsk->pending and -+ * tsk->sigqueue_cache -+ */ -+void flush_task_sigqueue(struct task_struct *tsk) -+{ -+ struct sigqueue *q; -+ -+ flush_sigqueue(&tsk->pending); -+ -+ q = get_task_cache(tsk); -+ if (q) -+ kmem_cache_free(sigqueue_cachep, q); -+} -+ - /* - * Flush all pending signals for this kthread. - */ -@@ -581,7 +639,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, - (info->si_code == SI_TIMER) && - (info->si_sys_private); - -- __sigqueue_free(first); -+ sigqueue_free_current(first); - } else { - /* - * Ok, it wasn't in the queue. This must be -@@ -618,6 +676,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) - bool resched_timer = false; - int signr; - -+ WARN_ON_ONCE(tsk != current); -+ - /* We only dequeue private signals from ourselves, we don't let - * signalfd steal them - */ -@@ -1756,7 +1816,8 @@ EXPORT_SYMBOL(kill_pid); - */ - struct sigqueue *sigqueue_alloc(void) - { -- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); -+ /* Preallocated sigqueue objects always from the slabcache ! */ -+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); - - if (q) - q->flags |= SIGQUEUE_PREALLOC; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch b/kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch deleted file mode 100644 index d61aab33a..000000000 --- a/kernel/patches-4.19.x-rt/0262-Add-localversion-for-RT-release.patch +++ /dev/null @@ -1,21 +0,0 @@ -From ae23509557da3f7ee876249340937db31d277915 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Fri, 8 Jul 2011 20:25:16 +0200 -Subject: [PATCH 262/328] Add localversion for -RT release - -Signed-off-by: Thomas Gleixner ---- - localversion-rt | 1 + - 1 file changed, 1 insertion(+) - create mode 100644 localversion-rt - -diff --git a/localversion-rt b/localversion-rt -new file mode 100644 -index 000000000000..1199ebade17b ---- /dev/null -+++ b/localversion-rt -@@ -0,0 +1 @@ -+-rt16 --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch b/kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch deleted file mode 100644 index d87500536..000000000 --- a/kernel/patches-4.19.x-rt/0263-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch +++ /dev/null @@ -1,96 +0,0 @@ -From b84964140e4a5f15b5710e2ddd99a6560411542d Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 26 Mar 2019 18:31:54 +0100 -Subject: [PATCH 263/328] powerpc/pseries/iommu: Use a locallock instead - local_irq_save() - -The locallock protects the per-CPU variable tce_page. The function -attempts to allocate memory while tce_page is protected (by disabling -interrupts). - -Use local_irq_save() instead of local_irq_disable(). - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------ - 1 file changed, 10 insertions(+), 6 deletions(-) - -diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c -index b1a08cb760e0..8ef818ad83ec 100644 ---- a/arch/powerpc/platforms/pseries/iommu.c -+++ b/arch/powerpc/platforms/pseries/iommu.c -@@ -38,6 +38,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -212,6 +213,7 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, - } - - static DEFINE_PER_CPU(__be64 *, tce_page); -+static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock); - - static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, - long npages, unsigned long uaddr, -@@ -233,7 +235,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, - direction, attrs); - } - -- local_irq_save(flags); /* to protect tcep and the page behind it */ -+ /* to protect tcep and the page behind it */ -+ local_lock_irqsave(tcp_page_lock, flags); - - tcep = __this_cpu_read(tce_page); - -@@ -244,7 +247,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, - tcep = (__be64 *)__get_free_page(GFP_ATOMIC); - /* If allocation fails, fall back to the loop implementation */ - if (!tcep) { -- local_irq_restore(flags); -+ local_unlock_irqrestore(tcp_page_lock, flags); - return tce_build_pSeriesLP(tbl->it_index, tcenum, - tbl->it_page_shift, - npages, uaddr, direction, attrs); -@@ -279,7 +282,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, - tcenum += limit; - } while (npages > 0 && !rc); - -- local_irq_restore(flags); -+ local_unlock_irqrestore(tcp_page_lock, flags); - - if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { - ret = (int)rc; -@@ -450,13 +453,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, - DMA_BIDIRECTIONAL, 0); - } - -- local_irq_disable(); /* to protect tcep and the page behind it */ -+ /* to protect tcep and the page behind it */ -+ local_lock_irq(tcp_page_lock); - tcep = __this_cpu_read(tce_page); - - if (!tcep) { - tcep = (__be64 *)__get_free_page(GFP_ATOMIC); - if (!tcep) { -- local_irq_enable(); -+ local_unlock_irq(tcp_page_lock); - return -ENOMEM; - } - __this_cpu_write(tce_page, tcep); -@@ -502,7 +506,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, - - /* error cleanup: caller will clear whole range */ - -- local_irq_enable(); -+ local_unlock_irq(tcp_page_lock); - return rc; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch b/kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch deleted file mode 100644 index 1becf6a00..000000000 --- a/kernel/patches-4.19.x-rt/0264-powerpc-reshuffle-TIF-bits.patch +++ /dev/null @@ -1,151 +0,0 @@ -From a42bc60446357a00c18c446a7600b5b7fd090069 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 22 Mar 2019 17:15:58 +0100 -Subject: [PATCH 264/328] powerpc: reshuffle TIF bits - -Powerpc32/64 does not compile because TIF_SYSCALL_TRACE's bit is higher -than 15 and the assembly instructions don't expect that. - -Move TIF_RESTOREALL, TIF_NOERROR to the higher bits and keep -TIF_NEED_RESCHED_LAZY in the lower range. As a result one split load is -needed and otherwise we can use immediates. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - arch/powerpc/include/asm/thread_info.h | 11 +++++++---- - arch/powerpc/kernel/entry_32.S | 12 +++++++----- - arch/powerpc/kernel/entry_64.S | 12 +++++++----- - 3 files changed, 21 insertions(+), 14 deletions(-) - -diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h -index ce316076bc52..64c3d1a720e2 100644 ---- a/arch/powerpc/include/asm/thread_info.h -+++ b/arch/powerpc/include/asm/thread_info.h -@@ -83,18 +83,18 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src - #define TIF_SIGPENDING 1 /* signal pending */ - #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ - #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ --#define TIF_NEED_RESCHED_LAZY 4 /* lazy rescheduling necessary */ - #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ - #define TIF_PATCH_PENDING 6 /* pending live patching update */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ - #define TIF_SINGLESTEP 8 /* singlestepping active */ - #define TIF_NOHZ 9 /* in adaptive nohz mode */ - #define TIF_SECCOMP 10 /* secure computing */ --#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ --#define TIF_NOERROR 12 /* Force successful syscall return */ -+ -+#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */ -+#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */ -+ - #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ - #define TIF_UPROBE 14 /* breakpointed or single-stepping */ --#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ - #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation - for stack store? */ - #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ -@@ -103,6 +103,9 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src - #endif - #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ - #define TIF_32BIT 20 /* 32 bit binary */ -+#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */ -+#define TIF_NOERROR 22 /* Force successful syscall return */ -+ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1< -Date: Wed, 13 Mar 2019 11:40:34 +0000 -Subject: [PATCH 265/328] tty/sysrq: Convert show_lock to raw_spinlock_t - -Systems which don't provide arch_trigger_cpumask_backtrace() will -invoke showacpu() from a smp_call_function() function which is invoked -with disabled interrupts even on -RT systems. - -The function acquires the show_lock lock which only purpose is to -ensure that the CPUs don't print simultaneously. Otherwise the -output would clash and it would be hard to tell the output from CPUx -apart from CPUy. - -On -RT the spin_lock() can not be acquired from this context. A -raw_spin_lock() is required. It will introduce the system's latency -by performing the sysrq request and other CPUs will block on the lock -until the request is done. This is okay because the user asked for a -backtrace of all active CPUs and under "normal circumstances in -production" this path should not be triggered. - -Signed-off-by: Julien Grall -Signed-off-by: Steven Rostedt (VMware) -[bigeasy@linuxtronix.de: commit description] -Signed-off-by: Sebastian Andrzej Siewior -Acked-by: Sebastian Andrzej Siewior -Signed-off-by: Greg Kroah-Hartman -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/tty/sysrq.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c -index 06ed20dd01ba..627517ad55bf 100644 ---- a/drivers/tty/sysrq.c -+++ b/drivers/tty/sysrq.c -@@ -215,7 +215,7 @@ static struct sysrq_key_op sysrq_showlocks_op = { - #endif - - #ifdef CONFIG_SMP --static DEFINE_SPINLOCK(show_lock); -+static DEFINE_RAW_SPINLOCK(show_lock); - - static void showacpu(void *dummy) - { -@@ -225,10 +225,10 @@ static void showacpu(void *dummy) - if (idle_cpu(smp_processor_id())) - return; - -- spin_lock_irqsave(&show_lock, flags); -+ raw_spin_lock_irqsave(&show_lock, flags); - pr_info("CPU%d:\n", smp_processor_id()); - show_stack(NULL, NULL); -- spin_unlock_irqrestore(&show_lock, flags); -+ raw_spin_unlock_irqrestore(&show_lock, flags); - } - - static void sysrq_showregs_othercpus(struct work_struct *dummy) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch b/kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch deleted file mode 100644 index 6cbccedb3..000000000 --- a/kernel/patches-4.19.x-rt/0266-drm-i915-Don-t-disable-interrupts-independently-of-t.patch +++ /dev/null @@ -1,50 +0,0 @@ -From ed7f01fba8a5fbc0d32f5bab0fa3fca250530999 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 10 Apr 2019 11:01:37 +0200 -Subject: [PATCH 266/328] drm/i915: Don't disable interrupts independently of - the lock - -The locks (timeline->lock and rq->lock) need to be taken with disabled -interrupts. This is done in __retire_engine_request() by disabling the -interrupts independently of the locks itself. -While local_irq_disable()+spin_lock() equals spin_lock_irq() on vanilla -it does not on RT. Also, it is not obvious if there is a special reason -to why the interrupts are disabled independently of the lock. - -Enable/disable interrupts as part of the locking instruction. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/gpu/drm/i915/i915_request.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - -diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c -index 5c2c93cbab12..7124510b9131 100644 ---- a/drivers/gpu/drm/i915/i915_request.c -+++ b/drivers/gpu/drm/i915/i915_request.c -@@ -356,9 +356,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, - - GEM_BUG_ON(!i915_request_completed(rq)); - -- local_irq_disable(); -- -- spin_lock(&engine->timeline.lock); -+ spin_lock_irq(&engine->timeline.lock); - GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); - list_del_init(&rq->link); - spin_unlock(&engine->timeline.lock); -@@ -372,9 +370,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, - GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); - atomic_dec(&rq->i915->gt_pm.rps.num_waiters); - } -- spin_unlock(&rq->lock); -- -- local_irq_enable(); -+ spin_unlock_irq(&rq->lock); - - /* - * The backing object for the context is done after switching to the --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch b/kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch deleted file mode 100644 index 8efca1790..000000000 --- a/kernel/patches-4.19.x-rt/0267-sched-completion-Fix-a-lockup-in-wait_for_completion.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 62aaf2e45d6bc981cec8335fd58bfe5216343cce Mon Sep 17 00:00:00 2001 -From: Corey Minyard -Date: Thu, 9 May 2019 14:33:20 -0500 -Subject: [PATCH 267/328] sched/completion: Fix a lockup in - wait_for_completion() - -Consider following race: - - T0 T1 T2 - wait_for_completion() - do_wait_for_common() - __prepare_to_swait() - schedule() - complete() - x->done++ (0 -> 1) - raw_spin_lock_irqsave() - swake_up_locked() wait_for_completion() - wake_up_process(T0) - list_del_init() - raw_spin_unlock_irqrestore() - raw_spin_lock_irq(&x->wait.lock) - raw_spin_lock_irq(&x->wait.lock) x->done != UINT_MAX, 1 -> 0 - raw_spin_unlock_irq(&x->wait.lock) - return 1 - while (!x->done && timeout), - continue loop, not enqueued - on &x->wait - -Basically, the problem is that the original wait queues used in -completions did not remove the item from the queue in the wakeup -function, but swake_up_locked() does. - -Fix it by adding the thread to the wait queue inside the do loop. -The design of swait detects if it is already in the list and doesn't -do the list add again. - -Cc: stable-rt@vger.kernel.org -Fixes: a04ff6b4ec4ee7e ("completion: Use simple wait queues") -Signed-off-by: Corey Minyard -Acked-by: Steven Rostedt (VMware) -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: shorten commit message ] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/sched/completion.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c -index 755a58084978..49c14137988e 100644 ---- a/kernel/sched/completion.c -+++ b/kernel/sched/completion.c -@@ -72,12 +72,12 @@ do_wait_for_common(struct completion *x, - if (!x->done) { - DECLARE_SWAITQUEUE(wait); - -- __prepare_to_swait(&x->wait, &wait); - do { - if (signal_pending_state(state, current)) { - timeout = -ERESTARTSYS; - break; - } -+ __prepare_to_swait(&x->wait, &wait); - __set_current_state(state); - raw_spin_unlock_irq(&x->wait.lock); - timeout = action(timeout); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0268-kthread-add-a-global-worker-thread.patch b/kernel/patches-4.19.x-rt/0268-kthread-add-a-global-worker-thread.patch deleted file mode 100644 index a0053e1ef..000000000 --- a/kernel/patches-4.19.x-rt/0268-kthread-add-a-global-worker-thread.patch +++ /dev/null @@ -1,179 +0,0 @@ -From e63273e590c9d2a348ac876a3e0a86fe0053be88 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 12 Feb 2019 15:09:38 +0100 -Subject: [PATCH 268/328] kthread: add a global worker thread. - -[ Upstream commit 0532e87d9d44795221aa921ba7024bde689cc894 ] - -Add kthread_schedule_work() which uses a global kthread for all its -jobs. -Split the cgroup include to avoid recussive includes from interrupt.h. -Fixup everything that fails to build (and did not include all header). - -Signed-off-by: Sebastian Andrzej Siewior -[ - Fixed up include in blk-cgroup.h reported by Juri Lelli - http://lkml.kernel.org/r/20190722083009.GE25636@localhost.localdomain -] -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/block/loop.c | 2 +- - drivers/spi/spi-rockchip.c | 1 + - include/linux/blk-cgroup.h | 2 +- - include/linux/kthread-cgroup.h | 17 +++++++++++++++++ - include/linux/kthread.h | 17 +++++++---------- - init/main.c | 1 + - kernel/kthread.c | 14 ++++++++++++++ - 7 files changed, 42 insertions(+), 12 deletions(-) - create mode 100644 include/linux/kthread-cgroup.h - -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index 9cd231a27328..351ea22ffb56 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -70,7 +70,7 @@ - #include - #include - #include --#include -+#include - #include - #include - #include -diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c -index 185bbdce62b1..63b10236eb05 100644 ---- a/drivers/spi/spi-rockchip.c -+++ b/drivers/spi/spi-rockchip.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - #define DRIVER_NAME "rockchip-spi" - -diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h -index 6d766a19f2bb..0473efda4c65 100644 ---- a/include/linux/blk-cgroup.h -+++ b/include/linux/blk-cgroup.h -@@ -14,7 +14,7 @@ - * Nauman Rafique - */ - --#include -+#include - #include - #include - #include -diff --git a/include/linux/kthread-cgroup.h b/include/linux/kthread-cgroup.h -new file mode 100644 -index 000000000000..53d34bca9d72 ---- /dev/null -+++ b/include/linux/kthread-cgroup.h -@@ -0,0 +1,17 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef _LINUX_KTHREAD_CGROUP_H -+#define _LINUX_KTHREAD_CGROUP_H -+#include -+#include -+ -+#ifdef CONFIG_BLK_CGROUP -+void kthread_associate_blkcg(struct cgroup_subsys_state *css); -+struct cgroup_subsys_state *kthread_blkcg(void); -+#else -+static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } -+static inline struct cgroup_subsys_state *kthread_blkcg(void) -+{ -+ return NULL; -+} -+#endif -+#endif -diff --git a/include/linux/kthread.h b/include/linux/kthread.h -index ad292898f7f2..7cf56eb54103 100644 ---- a/include/linux/kthread.h -+++ b/include/linux/kthread.h -@@ -4,7 +4,6 @@ - /* Simple interface for creating and stopping kernel threads without mess. */ - #include - #include --#include - - __printf(4, 5) - struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), -@@ -106,7 +105,7 @@ struct kthread_delayed_work { - }; - - #define KTHREAD_WORKER_INIT(worker) { \ -- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ -+ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ - .work_list = LIST_HEAD_INIT((worker).work_list), \ - .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ - } -@@ -198,14 +197,12 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); - - void kthread_destroy_worker(struct kthread_worker *worker); - --#ifdef CONFIG_BLK_CGROUP --void kthread_associate_blkcg(struct cgroup_subsys_state *css); --struct cgroup_subsys_state *kthread_blkcg(void); --#else --static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } --static inline struct cgroup_subsys_state *kthread_blkcg(void) -+extern struct kthread_worker kthread_global_worker; -+void kthread_init_global_worker(void); -+ -+static inline bool kthread_schedule_work(struct kthread_work *work) - { -- return NULL; -+ return kthread_queue_work(&kthread_global_worker, work); - } --#endif -+ - #endif /* _LINUX_KTHREAD_H */ -diff --git a/init/main.c b/init/main.c -index 6e02188386a7..e514dd93de76 100644 ---- a/init/main.c -+++ b/init/main.c -@@ -1129,6 +1129,7 @@ static noinline void __init kernel_init_freeable(void) - smp_prepare_cpus(setup_max_cpus); - - workqueue_init(); -+ kthread_init_global_worker(); - - init_mm_internals(); - -diff --git a/kernel/kthread.c b/kernel/kthread.c -index 5641b55783a6..9db017761a1f 100644 ---- a/kernel/kthread.c -+++ b/kernel/kthread.c -@@ -20,6 +20,7 @@ - #include - #include - #include -+#include - #include - - static DEFINE_SPINLOCK(kthread_create_lock); -@@ -1180,6 +1181,19 @@ void kthread_destroy_worker(struct kthread_worker *worker) - } - EXPORT_SYMBOL(kthread_destroy_worker); - -+DEFINE_KTHREAD_WORKER(kthread_global_worker); -+EXPORT_SYMBOL(kthread_global_worker); -+ -+__init void kthread_init_global_worker(void) -+{ -+ kthread_global_worker.task = kthread_create(kthread_worker_fn, -+ &kthread_global_worker, -+ "kswork"); -+ if (WARN_ON(IS_ERR(kthread_global_worker.task))) -+ return; -+ wake_up_process(kthread_global_worker.task); -+} -+ - #ifdef CONFIG_BLK_CGROUP - /** - * kthread_associate_blkcg - associate blkcg to current kthread --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch b/kernel/patches-4.19.x-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch deleted file mode 100644 index 6bb75889c..000000000 --- a/kernel/patches-4.19.x-rt/0269-genirq-Do-not-invoke-the-affinity-callback-via-a-wor.patch +++ /dev/null @@ -1,100 +0,0 @@ -From ea09ce2ccce72c0a8abbd8c5971aee290b24b222 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 21 Aug 2013 17:48:46 +0200 -Subject: [PATCH 269/328] genirq: Do not invoke the affinity callback via a - workqueue on RT - -[ Upstream commit 2122adbe011cdc0eb62ad62494e181005b23c76a ] - -Joe Korty reported, that __irq_set_affinity_locked() schedules a -workqueue while holding a rawlock which results in a might_sleep() -warning. -This patch uses swork_queue() instead. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - include/linux/interrupt.h | 5 ++--- - kernel/irq/manage.c | 19 ++++--------------- - 2 files changed, 6 insertions(+), 18 deletions(-) - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 72333899f043..a9321f6429f2 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -13,7 +13,7 @@ - #include - #include - #include --#include -+#include - - #include - #include -@@ -228,7 +228,6 @@ extern void resume_device_irqs(void); - * struct irq_affinity_notify - context for notification of IRQ affinity changes - * @irq: Interrupt to which notification applies - * @kref: Reference count, for internal use -- * @swork: Swork item, for internal use - * @work: Work item, for internal use - * @notify: Function to be called on change. This will be - * called in process context. -@@ -241,7 +240,7 @@ struct irq_affinity_notify { - unsigned int irq; - struct kref kref; - #ifdef CONFIG_PREEMPT_RT_BASE -- struct swork_event swork; -+ struct kthread_work work; - #else - struct work_struct work; - #endif -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index ce86341a9e19..d5539e04e00a 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -287,7 +287,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, - kref_get(&desc->affinity_notify->kref); - - #ifdef CONFIG_PREEMPT_RT_BASE -- swork_queue(&desc->affinity_notify->swork); -+ kthread_schedule_work(&desc->affinity_notify->work); - #else - schedule_work(&desc->affinity_notify->work); - #endif -@@ -352,21 +352,11 @@ static void _irq_affinity_notify(struct irq_affinity_notify *notify) - } - - #ifdef CONFIG_PREEMPT_RT_BASE --static void init_helper_thread(void) --{ -- static int init_sworker_once; -- -- if (init_sworker_once) -- return; -- if (WARN_ON(swork_get())) -- return; -- init_sworker_once = 1; --} - --static void irq_affinity_notify(struct swork_event *swork) -+static void irq_affinity_notify(struct kthread_work *work) - { - struct irq_affinity_notify *notify = -- container_of(swork, struct irq_affinity_notify, swork); -+ container_of(work, struct irq_affinity_notify, work); - _irq_affinity_notify(notify); - } - -@@ -409,8 +399,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) - notify->irq = irq; - kref_init(¬ify->kref); - #ifdef CONFIG_PREEMPT_RT_BASE -- INIT_SWORK(¬ify->swork, irq_affinity_notify); -- init_helper_thread(); -+ kthread_init_work(¬ify->work, irq_affinity_notify); - #else - INIT_WORK(¬ify->work, irq_affinity_notify); - #endif --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch b/kernel/patches-4.19.x-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch deleted file mode 100644 index d43786d84..000000000 --- a/kernel/patches-4.19.x-rt/0270-genirq-Handle-missing-work_struct-in-irq_set_affinit.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 2c123db44d2e5daa7c6012fa43715d99706c84a5 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 28 May 2019 10:42:15 +0200 -Subject: [PATCH 270/328] genirq: Handle missing work_struct in - irq_set_affinity_notifier() - -[ Upstream commit bbc4d2a7d6ff54ba923640d9a42c7bef7185fe98 ] - -The backported stable commit - 59c39840f5abf ("genirq: Prevent use-after-free and work list corruption") - -added cancel_work_sync() on a work_struct element which is not available -in RT. - -Replace cancel_work_sync() with kthread_cancel_work_sync() on RT. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/irq/manage.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index d5539e04e00a..290cd520dba1 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -411,8 +411,9 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) - raw_spin_unlock_irqrestore(&desc->lock, flags); - - if (old_notify) { --#ifndef CONFIG_PREEMPT_RT_BASE -- /* Need to address this for PREEMPT_RT */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ kthread_cancel_work_sync(¬ify->work); -+#else - cancel_work_sync(&old_notify->work); - #endif - kref_put(&old_notify->kref, old_notify->release); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch b/kernel/patches-4.19.x-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch deleted file mode 100644 index 72ee5b6ab..000000000 --- a/kernel/patches-4.19.x-rt/0271-arm-imx6-cpuidle-Use-raw_spinlock_t.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0db2daea3de433d6d516bde0bd855fc8be716a8c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 14 May 2019 17:07:44 +0200 -Subject: [PATCH 271/328] arm: imx6: cpuidle: Use raw_spinlock_t - -[ Upstream commit 40d0332ec8312e9c090f0a5414d9c90e12b13611 ] - -The idle call back is invoked with disabled interrupts and requires -raw_spinlock_t locks to work. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - arch/arm/mach-imx/cpuidle-imx6q.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c -index 326e870d7123..d9ac80aa1eb0 100644 ---- a/arch/arm/mach-imx/cpuidle-imx6q.c -+++ b/arch/arm/mach-imx/cpuidle-imx6q.c -@@ -17,22 +17,22 @@ - #include "hardware.h" - - static int num_idle_cpus = 0; --static DEFINE_SPINLOCK(cpuidle_lock); -+static DEFINE_RAW_SPINLOCK(cpuidle_lock); - - static int imx6q_enter_wait(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) - { -- spin_lock(&cpuidle_lock); -+ raw_spin_lock(&cpuidle_lock); - if (++num_idle_cpus == num_online_cpus()) - imx6_set_lpm(WAIT_UNCLOCKED); -- spin_unlock(&cpuidle_lock); -+ raw_spin_unlock(&cpuidle_lock); - - cpu_do_idle(); - -- spin_lock(&cpuidle_lock); -+ raw_spin_lock(&cpuidle_lock); - if (num_idle_cpus-- == num_online_cpus()) - imx6_set_lpm(WAIT_CLOCKED); -- spin_unlock(&cpuidle_lock); -+ raw_spin_unlock(&cpuidle_lock); - - return index; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch b/kernel/patches-4.19.x-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch deleted file mode 100644 index 44845ad98..000000000 --- a/kernel/patches-4.19.x-rt/0272-rcu-Don-t-allow-to-change-rcu_normal_after_boot-on-R.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 63e76d2c0af10f033768782de397bb94afd9c20b Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 24 Jun 2019 18:29:13 +0200 -Subject: [PATCH 272/328] rcu: Don't allow to change rcu_normal_after_boot on - RT - -[ Upstream commit c6c058c10577815a2491ce661876cff00a4c3b15 ] - -On RT rcu_normal_after_boot is enabled by default. -Don't allow to disable it on RT because the "expedited rcu" would -introduce latency spikes. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/rcu/update.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c -index 16d8dba23329..ed75addd3ccd 100644 ---- a/kernel/rcu/update.c -+++ b/kernel/rcu/update.c -@@ -69,7 +69,9 @@ module_param(rcu_expedited, int, 0); - extern int rcu_normal; /* from sysctl */ - module_param(rcu_normal, int, 0); - static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); -+#ifndef CONFIG_PREEMPT_RT_FULL - module_param(rcu_normal_after_boot, int, 0); -+#endif - #endif /* #ifndef CONFIG_TINY_RCU */ - - #ifdef CONFIG_DEBUG_LOCK_ALLOC --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch b/kernel/patches-4.19.x-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch deleted file mode 100644 index 0fd7ddeea..000000000 --- a/kernel/patches-4.19.x-rt/0273-pci-switchtec-fix-stream_open.cocci-warnings.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 57434065eaa488e6dc4ab73d2e0915a3e6bc3992 Mon Sep 17 00:00:00 2001 -From: kbuild test robot -Date: Sat, 13 Apr 2019 11:22:51 +0800 -Subject: [PATCH 273/328] pci/switchtec: fix stream_open.cocci warnings - -[ Upstream commit 9462c69e29307adc95c289f50839d5d683973891 ] - -drivers/pci/switch/switchtec.c:395:1-17: ERROR: switchtec_fops: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix. - -Generated by: scripts/coccinelle/api/stream_open.cocci - -Cc: Kirill Smelkov -Cc: Julia Lawall -Fixes: 8a29a3bae2a2 ("pci/switchtec: Don't use completion's wait queue") -Cc: stable-rt@vger.kernel.org # where it applies to -Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1904131849350.2536@hadrien -Signed-off-by: kbuild test robot -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/pci/switch/switchtec.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c -index a8df847bedee..25d1f96cddc1 100644 ---- a/drivers/pci/switch/switchtec.c -+++ b/drivers/pci/switch/switchtec.c -@@ -356,7 +356,7 @@ static int switchtec_dev_open(struct inode *inode, struct file *filp) - return PTR_ERR(stuser); - - filp->private_data = stuser; -- nonseekable_open(inode, filp); -+ stream_open(inode, filp); - - dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch b/kernel/patches-4.19.x-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch deleted file mode 100644 index adc7f7378..000000000 --- a/kernel/patches-4.19.x-rt/0274-sched-core-Drop-a-preempt_disable_rt-statement.patch +++ /dev/null @@ -1,49 +0,0 @@ -From c373cd893601a19b54d0926c87daa939d6953f47 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 24 Jun 2019 19:33:16 +0200 -Subject: [PATCH 274/328] sched/core: Drop a preempt_disable_rt() statement - -[ Upstream commit 761126efdcbe3fa3e99c9079fa0ad6eca2f251f2 ] - -The caller holds a lock which already disables preemption. -Drop the preempt_disable_rt() statement in get_nohz_timer_target(). - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 9 ++------- - 1 file changed, 2 insertions(+), 7 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index cfde725e1017..678c2c4de4f5 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -558,14 +558,11 @@ void resched_cpu(int cpu) - */ - int get_nohz_timer_target(void) - { -- int i, cpu; -+ int i, cpu = smp_processor_id(); - struct sched_domain *sd; - -- preempt_disable_rt(); -- cpu = smp_processor_id(); -- - if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER)) -- goto preempt_en_rt; -+ return cpu; - - rcu_read_lock(); - for_each_domain(cpu, sd) { -@@ -584,8 +581,6 @@ int get_nohz_timer_target(void) - cpu = housekeeping_any_cpu(HK_FLAG_TIMER); - unlock: - rcu_read_unlock(); --preempt_en_rt: -- preempt_enable_rt(); - return cpu; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch b/kernel/patches-4.19.x-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch deleted file mode 100644 index 656572f32..000000000 --- a/kernel/patches-4.19.x-rt/0275-timers-Redo-the-notification-of-canceling-timers-on-.patch +++ /dev/null @@ -1,649 +0,0 @@ -From b624498d8a6db63e6d6390bbd5f560f37929a500 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 24 Jun 2019 19:39:06 +0200 -Subject: [PATCH 275/328] timers: Redo the notification of canceling timers on - -RT - -[ Upstream commit c71273154c2ad12e13333aada340ff30e826a11b ] - -Rework of the hrtimer, timer and posix-timer cancelation interface -on -RT. Instead of the swait/schedule interface we now have locks -which are taken while timer is active. During the cancellation of an -active timer the lock is acquired. The lock will then either -PI-boost the timer or block and wait until the timer completed. -The new code looks simpler and does not trigger a warning from -rcu_note_context_switch() anymore like reported by Grygorii Strashko -and Daniel Wagner. -The patches were contributed by Anna-Maria Gleixner. - -This is an all in one commit of the following patches: -| [PATCH] timers: Introduce expiry spin lock -| [PATCH] timers: Drop expiry lock after each timer invocation -| [PATCH] hrtimer: Introduce expiry spin lock -| [PATCH] posix-timers: move rcu out of union -| [PATCH] posix-timers: Add expiry lock - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - fs/timerfd.c | 5 +- - include/linux/hrtimer.h | 17 ++---- - include/linux/posix-timers.h | 1 + - kernel/time/alarmtimer.c | 2 +- - kernel/time/hrtimer.c | 36 ++++--------- - kernel/time/itimer.c | 2 +- - kernel/time/posix-cpu-timers.c | 23 ++++++++ - kernel/time/posix-timers.c | 69 ++++++++++-------------- - kernel/time/posix-timers.h | 2 + - kernel/time/timer.c | 96 ++++++++++++++++------------------ - 10 files changed, 118 insertions(+), 135 deletions(-) - -diff --git a/fs/timerfd.c b/fs/timerfd.c -index 82d0f52414a6..f845093466be 100644 ---- a/fs/timerfd.c -+++ b/fs/timerfd.c -@@ -471,10 +471,11 @@ static int do_timerfd_settime(int ufd, int flags, - break; - } - spin_unlock_irq(&ctx->wqh.lock); -+ - if (isalarm(ctx)) -- hrtimer_wait_for_timer(&ctx->t.alarm.timer); -+ hrtimer_grab_expiry_lock(&ctx->t.alarm.timer); - else -- hrtimer_wait_for_timer(&ctx->t.tmr); -+ hrtimer_grab_expiry_lock(&ctx->t.tmr); - } - - /* -diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h -index 082147c07831..aee31b1f0cc3 100644 ---- a/include/linux/hrtimer.h -+++ b/include/linux/hrtimer.h -@@ -22,7 +22,6 @@ - #include - #include - #include --#include - - struct hrtimer_clock_base; - struct hrtimer_cpu_base; -@@ -193,6 +192,8 @@ enum hrtimer_base_type { - * @nr_retries: Total number of hrtimer interrupt retries - * @nr_hangs: Total number of hrtimer interrupt hangs - * @max_hang_time: Maximum time spent in hrtimer_interrupt -+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are -+ * expired - * @expires_next: absolute time of the next event, is required for remote - * hrtimer enqueue; it is the total first expiry time (hard - * and soft hrtimer are taken into account) -@@ -220,12 +221,10 @@ struct hrtimer_cpu_base { - unsigned short nr_hangs; - unsigned int max_hang_time; - #endif -+ spinlock_t softirq_expiry_lock; - ktime_t expires_next; - struct hrtimer *next_timer; - ktime_t softirq_expires_next; --#ifdef CONFIG_PREEMPT_RT_BASE -- wait_queue_head_t wait; --#endif - struct hrtimer *softirq_next_timer; - struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - } ____cacheline_aligned; -@@ -426,6 +425,7 @@ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, - - extern int hrtimer_cancel(struct hrtimer *timer); - extern int hrtimer_try_to_cancel(struct hrtimer *timer); -+extern void hrtimer_grab_expiry_lock(const struct hrtimer *timer); - - static inline void hrtimer_start_expires(struct hrtimer *timer, - enum hrtimer_mode mode) -@@ -443,13 +443,6 @@ static inline void hrtimer_restart(struct hrtimer *timer) - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - --/* Softirq preemption could deadlock timer removal */ --#ifdef CONFIG_PREEMPT_RT_BASE -- extern void hrtimer_wait_for_timer(const struct hrtimer *timer); --#else --# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) --#endif -- - /* Query timers: */ - extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); - -@@ -481,7 +474,7 @@ static inline bool hrtimer_is_queued(struct hrtimer *timer) - * Helper function to check, whether the timer is running the callback - * function - */ --static inline int hrtimer_callback_running(const struct hrtimer *timer) -+static inline int hrtimer_callback_running(struct hrtimer *timer) - { - return timer->base->running == timer; - } -diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h -index 0571b498db73..3e6c91bdf2ef 100644 ---- a/include/linux/posix-timers.h -+++ b/include/linux/posix-timers.h -@@ -15,6 +15,7 @@ struct cpu_timer_list { - u64 expires, incr; - struct task_struct *task; - int firing; -+ int firing_cpu; - }; - - /* -diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c -index a465564367ec..dcf0204264f1 100644 ---- a/kernel/time/alarmtimer.c -+++ b/kernel/time/alarmtimer.c -@@ -438,7 +438,7 @@ int alarm_cancel(struct alarm *alarm) - int ret = alarm_try_to_cancel(alarm); - if (ret >= 0) - return ret; -- hrtimer_wait_for_timer(&alarm->timer); -+ hrtimer_grab_expiry_lock(&alarm->timer); - } - } - EXPORT_SYMBOL_GPL(alarm_cancel); -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index f16cbc98c47a..ed5d8d51ca91 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -963,33 +963,16 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) - } - EXPORT_SYMBOL_GPL(hrtimer_forward); - --#ifdef CONFIG_PREEMPT_RT_BASE --# define wake_up_timer_waiters(b) wake_up(&(b)->wait) -- --/** -- * hrtimer_wait_for_timer - Wait for a running timer -- * -- * @timer: timer to wait for -- * -- * The function waits in case the timers callback function is -- * currently executed on the waitqueue of the timer base. The -- * waitqueue is woken up after the timer callback function has -- * finished execution. -- */ --void hrtimer_wait_for_timer(const struct hrtimer *timer) -+void hrtimer_grab_expiry_lock(const struct hrtimer *timer) - { - struct hrtimer_clock_base *base = timer->base; - -- if (base && base->cpu_base && -- base->index >= HRTIMER_BASE_MONOTONIC_SOFT) -- wait_event(base->cpu_base->wait, -- !(hrtimer_callback_running(timer))); -+ if (base && base->cpu_base) { -+ spin_lock(&base->cpu_base->softirq_expiry_lock); -+ spin_unlock(&base->cpu_base->softirq_expiry_lock); -+ } - } - --#else --# define wake_up_timer_waiters(b) do { } while (0) --#endif -- - /* - * enqueue_hrtimer - internal function to (re)start a timer - * -@@ -1227,7 +1210,7 @@ int hrtimer_cancel(struct hrtimer *timer) - - if (ret >= 0) - return ret; -- hrtimer_wait_for_timer(timer); -+ hrtimer_grab_expiry_lock(timer); - } - } - EXPORT_SYMBOL_GPL(hrtimer_cancel); -@@ -1531,6 +1514,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) - unsigned long flags; - ktime_t now; - -+ spin_lock(&cpu_base->softirq_expiry_lock); - raw_spin_lock_irqsave(&cpu_base->lock, flags); - - now = hrtimer_update_base(cpu_base); -@@ -1540,7 +1524,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h) - hrtimer_update_softirq_timer(cpu_base, true); - - raw_spin_unlock_irqrestore(&cpu_base->lock, flags); -- wake_up_timer_waiters(cpu_base); -+ spin_unlock(&cpu_base->softirq_expiry_lock); - } - - #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1950,9 +1934,7 @@ int hrtimers_prepare_cpu(unsigned int cpu) - cpu_base->softirq_next_timer = NULL; - cpu_base->expires_next = KTIME_MAX; - cpu_base->softirq_expires_next = KTIME_MAX; --#ifdef CONFIG_PREEMPT_RT_BASE -- init_waitqueue_head(&cpu_base->wait); --#endif -+ spin_lock_init(&cpu_base->softirq_expiry_lock); - return 0; - } - -diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c -index 55b0e58368bf..a5ff222df4c7 100644 ---- a/kernel/time/itimer.c -+++ b/kernel/time/itimer.c -@@ -215,7 +215,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) - /* We are sharing ->siglock with it_real_fn() */ - if (hrtimer_try_to_cancel(timer) < 0) { - spin_unlock_irq(&tsk->sighand->siglock); -- hrtimer_wait_for_timer(&tsk->signal->real_timer); -+ hrtimer_grab_expiry_lock(timer); - goto again; - } - expires = timeval_to_ktime(value->it_value); -diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c -index 8d95e8de98b2..765e700962ab 100644 ---- a/kernel/time/posix-cpu-timers.c -+++ b/kernel/time/posix-cpu-timers.c -@@ -792,6 +792,7 @@ check_timers_list(struct list_head *timers, - return t->expires; - - t->firing = 1; -+ t->firing_cpu = smp_processor_id(); - list_move_tail(&t->entry, firing); - } - -@@ -1138,6 +1139,20 @@ static inline int fastpath_timer_check(struct task_struct *tsk) - return 0; - } - -+static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock); -+ -+void cpu_timers_grab_expiry_lock(struct k_itimer *timer) -+{ -+ int cpu = timer->it.cpu.firing_cpu; -+ -+ if (cpu >= 0) { -+ spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu); -+ -+ spin_lock_irq(expiry_lock); -+ spin_unlock_irq(expiry_lock); -+ } -+} -+ - /* - * This is called from the timer interrupt handler. The irq handler has - * already updated our counts. We need to check if any timers fire now. -@@ -1148,6 +1163,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) - LIST_HEAD(firing); - struct k_itimer *timer, *next; - unsigned long flags; -+ spinlock_t *expiry_lock; - - /* - * The fast path checks that there are no expired thread or thread -@@ -1156,6 +1172,9 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) - if (!fastpath_timer_check(tsk)) - return; - -+ expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock); -+ spin_lock(expiry_lock); -+ - if (!lock_task_sighand(tsk, &flags)) - return; - /* -@@ -1190,6 +1209,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) - list_del_init(&timer->it.cpu.entry); - cpu_firing = timer->it.cpu.firing; - timer->it.cpu.firing = 0; -+ timer->it.cpu.firing_cpu = -1; - /* - * The firing flag is -1 if we collided with a reset - * of the timer, which already reported this -@@ -1199,6 +1219,7 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) - cpu_timer_fire(timer); - spin_unlock(&timer->it_lock); - } -+ spin_unlock(expiry_lock); - } - - #ifdef CONFIG_PREEMPT_RT_BASE -@@ -1466,6 +1487,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, - spin_unlock_irq(&timer.it_lock); - - while (error == TIMER_RETRY) { -+ -+ cpu_timers_grab_expiry_lock(&timer); - /* - * We need to handle case when timer was or is in the - * middle of firing. In other cases we already freed -diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c -index a5ec421e3437..c7e97d421590 100644 ---- a/kernel/time/posix-timers.c -+++ b/kernel/time/posix-timers.c -@@ -821,25 +821,20 @@ static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - --/* -- * Protected by RCU! -- */ --static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timr) -+static int common_hrtimer_try_to_cancel(struct k_itimer *timr) - { --#ifdef CONFIG_PREEMPT_RT_FULL -- if (kc->timer_arm == common_hrtimer_arm) -- hrtimer_wait_for_timer(&timr->it.real.timer); -- else if (kc == &alarm_clock) -- hrtimer_wait_for_timer(&timr->it.alarm.alarmtimer.timer); -- else -- /* FIXME: Whacky hack for posix-cpu-timers */ -- schedule_timeout(1); --#endif -+ return hrtimer_try_to_cancel(&timr->it.real.timer); - } - --static int common_hrtimer_try_to_cancel(struct k_itimer *timr) -+static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer) - { -- return hrtimer_try_to_cancel(&timr->it.real.timer); -+ if (kc->timer_arm == common_hrtimer_arm) -+ hrtimer_grab_expiry_lock(&timer->it.real.timer); -+ else if (kc == &alarm_clock) -+ hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer); -+ else -+ /* posix-cpu-timers */ -+ cpu_timers_grab_expiry_lock(timer); - } - - /* Set a POSIX.1b interval timer. */ -@@ -901,21 +896,21 @@ static int do_timer_settime(timer_t timer_id, int flags, - if (!timr) - return -EINVAL; - -- rcu_read_lock(); - kc = timr->kclock; - if (WARN_ON_ONCE(!kc || !kc->timer_set)) - error = -EINVAL; - else - error = kc->timer_set(timr, flags, new_spec64, old_spec64); - -- unlock_timer(timr, flag); - if (error == TIMER_RETRY) { -+ rcu_read_lock(); -+ unlock_timer(timr, flag); - timer_wait_for_callback(kc, timr); -- old_spec64 = NULL; // We already got the old time... - rcu_read_unlock(); -+ old_spec64 = NULL; // We already got the old time... - goto retry; - } -- rcu_read_unlock(); -+ unlock_timer(timr, flag); - - return error; - } -@@ -977,13 +972,21 @@ int common_timer_del(struct k_itimer *timer) - return 0; - } - --static inline int timer_delete_hook(struct k_itimer *timer) -+static int timer_delete_hook(struct k_itimer *timer) - { - const struct k_clock *kc = timer->kclock; -+ int ret; - - if (WARN_ON_ONCE(!kc || !kc->timer_del)) - return -EINVAL; -- return kc->timer_del(timer); -+ ret = kc->timer_del(timer); -+ if (ret == TIMER_RETRY) { -+ rcu_read_lock(); -+ spin_unlock_irq(&timer->it_lock); -+ timer_wait_for_callback(kc, timer); -+ rcu_read_unlock(); -+ } -+ return ret; - } - - /* Delete a POSIX.1b interval timer. */ -@@ -997,15 +1000,8 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) - if (!timer) - return -EINVAL; - -- rcu_read_lock(); -- if (timer_delete_hook(timer) == TIMER_RETRY) { -- unlock_timer(timer, flags); -- timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -- timer); -- rcu_read_unlock(); -+ if (timer_delete_hook(timer) == TIMER_RETRY) - goto retry_delete; -- } -- rcu_read_unlock(); - - spin_lock(¤t->sighand->siglock); - list_del(&timer->list); -@@ -1031,20 +1027,9 @@ static void itimer_delete(struct k_itimer *timer) - retry_delete: - spin_lock_irqsave(&timer->it_lock, flags); - -- /* On RT we can race with a deletion */ -- if (!timer->it_signal) { -- unlock_timer(timer, flags); -- return; -- } -- -- if (timer_delete_hook(timer) == TIMER_RETRY) { -- rcu_read_lock(); -- unlock_timer(timer, flags); -- timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -- timer); -- rcu_read_unlock(); -+ if (timer_delete_hook(timer) == TIMER_RETRY) - goto retry_delete; -- } -+ - list_del(&timer->list); - /* - * This keeps any tasks waiting on the spin lock from thinking -diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h -index ddb21145211a..725bd230a8db 100644 ---- a/kernel/time/posix-timers.h -+++ b/kernel/time/posix-timers.h -@@ -32,6 +32,8 @@ extern const struct k_clock clock_process; - extern const struct k_clock clock_thread; - extern const struct k_clock alarm_clock; - -+extern void cpu_timers_grab_expiry_lock(struct k_itimer *timer); -+ - int posix_timer_event(struct k_itimer *timr, int si_private); - - void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting); -diff --git a/kernel/time/timer.c b/kernel/time/timer.c -index 2fcd56aa6092..1c67aab34ff5 100644 ---- a/kernel/time/timer.c -+++ b/kernel/time/timer.c -@@ -44,7 +44,6 @@ - #include - #include - #include --#include - - #include - #include -@@ -198,9 +197,7 @@ EXPORT_SYMBOL(jiffies_64); - struct timer_base { - raw_spinlock_t lock; - struct timer_list *running_timer; --#ifdef CONFIG_PREEMPT_RT_FULL -- struct swait_queue_head wait_for_running_timer; --#endif -+ spinlock_t expiry_lock; - unsigned long clk; - unsigned long next_expiry; - unsigned int cpu; -@@ -1189,33 +1186,6 @@ void add_timer_on(struct timer_list *timer, int cpu) - } - EXPORT_SYMBOL_GPL(add_timer_on); - --#ifdef CONFIG_PREEMPT_RT_FULL --/* -- * Wait for a running timer -- */ --static void wait_for_running_timer(struct timer_list *timer) --{ -- struct timer_base *base; -- u32 tf = timer->flags; -- -- if (tf & TIMER_MIGRATING) -- return; -- -- base = get_timer_base(tf); -- swait_event_exclusive(base->wait_for_running_timer, -- base->running_timer != timer); --} -- --# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) --#else --static inline void wait_for_running_timer(struct timer_list *timer) --{ -- cpu_relax(); --} -- --# define wakeup_timer_waiters(b) do { } while (0) --#endif -- - /** - * del_timer - deactivate a timer. - * @timer: the timer to be deactivated -@@ -1245,14 +1215,8 @@ int del_timer(struct timer_list *timer) - } - EXPORT_SYMBOL(del_timer); - --/** -- * try_to_del_timer_sync - Try to deactivate a timer -- * @timer: timer to delete -- * -- * This function tries to deactivate a timer. Upon successful (ret >= 0) -- * exit the timer is not queued and the handler is not running on any CPU. -- */ --int try_to_del_timer_sync(struct timer_list *timer) -+static int __try_to_del_timer_sync(struct timer_list *timer, -+ struct timer_base **basep) - { - struct timer_base *base; - unsigned long flags; -@@ -1260,7 +1224,7 @@ int try_to_del_timer_sync(struct timer_list *timer) - - debug_assert_init(timer); - -- base = lock_timer_base(timer, &flags); -+ *basep = base = lock_timer_base(timer, &flags); - - if (base->running_timer != timer) - ret = detach_if_pending(timer, base, true); -@@ -1269,9 +1233,42 @@ int try_to_del_timer_sync(struct timer_list *timer) - - return ret; - } -+ -+/** -+ * try_to_del_timer_sync - Try to deactivate a timer -+ * @timer: timer to delete -+ * -+ * This function tries to deactivate a timer. Upon successful (ret >= 0) -+ * exit the timer is not queued and the handler is not running on any CPU. -+ */ -+int try_to_del_timer_sync(struct timer_list *timer) -+{ -+ struct timer_base *base; -+ -+ return __try_to_del_timer_sync(timer, &base); -+} - EXPORT_SYMBOL(try_to_del_timer_sync); - - #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) -+static int __del_timer_sync(struct timer_list *timer) -+{ -+ struct timer_base *base; -+ int ret; -+ -+ for (;;) { -+ ret = __try_to_del_timer_sync(timer, &base); -+ if (ret >= 0) -+ return ret; -+ -+ /* -+ * When accessing the lock, timers of base are no longer expired -+ * and so timer is no longer running. -+ */ -+ spin_lock(&base->expiry_lock); -+ spin_unlock(&base->expiry_lock); -+ } -+} -+ - /** - * del_timer_sync - deactivate a timer and wait for the handler to finish. - * @timer: the timer to be deactivated -@@ -1327,12 +1324,8 @@ int del_timer_sync(struct timer_list *timer) - * could lead to deadlock. - */ - WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); -- for (;;) { -- int ret = try_to_del_timer_sync(timer); -- if (ret >= 0) -- return ret; -- wait_for_running_timer(timer); -- } -+ -+ return __del_timer_sync(timer); - } - EXPORT_SYMBOL(del_timer_sync); - #endif -@@ -1397,11 +1390,15 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) - raw_spin_unlock(&base->lock); - call_timer_fn(timer, fn); - base->running_timer = NULL; -+ spin_unlock(&base->expiry_lock); -+ spin_lock(&base->expiry_lock); - raw_spin_lock(&base->lock); - } else { - raw_spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn); - base->running_timer = NULL; -+ spin_unlock(&base->expiry_lock); -+ spin_lock(&base->expiry_lock); - raw_spin_lock_irq(&base->lock); - } - } -@@ -1698,6 +1695,7 @@ static inline void __run_timers(struct timer_base *base) - if (!time_after_eq(jiffies, base->clk)) - return; - -+ spin_lock(&base->expiry_lock); - raw_spin_lock_irq(&base->lock); - - /* -@@ -1725,7 +1723,7 @@ static inline void __run_timers(struct timer_base *base) - expire_timers(base, heads + levels); - } - raw_spin_unlock_irq(&base->lock); -- wakeup_timer_waiters(base); -+ spin_unlock(&base->expiry_lock); - } - - /* -@@ -1972,9 +1970,7 @@ static void __init init_timer_cpu(int cpu) - base->cpu = cpu; - raw_spin_lock_init(&base->lock); - base->clk = jiffies; --#ifdef CONFIG_PREEMPT_RT_FULL -- init_swait_queue_head(&base->wait_for_running_timer); --#endif -+ spin_lock_init(&base->expiry_lock); - } - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch b/kernel/patches-4.19.x-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch deleted file mode 100644 index 43fd36ab9..000000000 --- a/kernel/patches-4.19.x-rt/0276-Revert-futex-Ensure-lock-unlock-symetry-versus-pi_lo.patch +++ /dev/null @@ -1,34 +0,0 @@ -From b53286215979d7a0944e4d1e7483b8ef3b91d1d4 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 26 Jun 2019 17:44:13 +0200 -Subject: [PATCH 276/328] Revert "futex: Ensure lock/unlock symetry versus - pi_lock and hash bucket lock" - -[ Upstream commit 6a773b70cf105b46298ed3b44e77c102ce31d9ec ] - -Drop the RT fixup, the futex code will be changed to avoid the need for -the workaround. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/futex.c | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index 4c448dddce3c..38f53b95e370 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -921,9 +921,7 @@ void exit_pi_state_list(struct task_struct *curr) - if (head->next != next) { - /* retain curr->pi_lock for the loop invariant */ - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); -- raw_spin_unlock_irq(&curr->pi_lock); - spin_unlock(&hb->lock); -- raw_spin_lock_irq(&curr->pi_lock); - put_pi_state(pi_state); - continue; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch b/kernel/patches-4.19.x-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch deleted file mode 100644 index 847fab77f..000000000 --- a/kernel/patches-4.19.x-rt/0277-Revert-futex-Fix-bug-on-when-a-requeued-RT-task-time.patch +++ /dev/null @@ -1,82 +0,0 @@ -From 72c47093b9b237534280c6953d3cc0c635a96d3c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 26 Jun 2019 17:44:18 +0200 -Subject: [PATCH 277/328] Revert "futex: Fix bug on when a requeued RT task - times out" - -[ Upstream commit f1a170cb3289a48df26cae3c60d77608f7a988bb ] - -Drop the RT fixup, the futex code will be changed to avoid the need for -the workaround. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/locking/rtmutex.c | 31 +------------------------------ - kernel/locking/rtmutex_common.h | 1 - - 2 files changed, 1 insertion(+), 31 deletions(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 2a9bf2443acc..7f6f402e04ae 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -144,8 +144,7 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) - - static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) - { -- return waiter && waiter != PI_WAKEUP_INPROGRESS && -- waiter != PI_REQUEUE_INPROGRESS; -+ return waiter && waiter != PI_WAKEUP_INPROGRESS; - } - - /* -@@ -2350,34 +2349,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - if (try_to_take_rt_mutex(lock, task, NULL)) - return 1; - --#ifdef CONFIG_PREEMPT_RT_FULL -- /* -- * In PREEMPT_RT there's an added race. -- * If the task, that we are about to requeue, times out, -- * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue -- * to skip this task. But right after the task sets -- * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then -- * block on the spin_lock(&hb->lock), which in RT is an rtmutex. -- * This will replace the PI_WAKEUP_INPROGRESS with the actual -- * lock that it blocks on. We *must not* place this task -- * on this proxy lock in that case. -- * -- * To prevent this race, we first take the task's pi_lock -- * and check if it has updated its pi_blocked_on. If it has, -- * we assume that it woke up and we return -EAGAIN. -- * Otherwise, we set the task's pi_blocked_on to -- * PI_REQUEUE_INPROGRESS, so that if the task is waking up -- * it will know that we are in the process of requeuing it. -- */ -- raw_spin_lock(&task->pi_lock); -- if (task->pi_blocked_on) { -- raw_spin_unlock(&task->pi_lock); -- return -EAGAIN; -- } -- task->pi_blocked_on = PI_REQUEUE_INPROGRESS; -- raw_spin_unlock(&task->pi_lock); --#endif -- - /* We enforce deadlock detection for futexes */ - ret = task_blocks_on_rt_mutex(lock, waiter, task, - RT_MUTEX_FULL_CHAINWALK); -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index 546aaf058b9e..a501f3b47081 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -133,7 +133,6 @@ enum rtmutex_chainwalk { - * PI-futex support (proxy locking functions, etc.): - */ - #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) --#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) - - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch b/kernel/patches-4.19.x-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch deleted file mode 100644 index d7565c9d4..000000000 --- a/kernel/patches-4.19.x-rt/0278-Revert-rtmutex-Handle-the-various-new-futex-race-con.patch +++ /dev/null @@ -1,257 +0,0 @@ -From a5fc42329ec8c08a961d8bc3f1a6d565ec867bcc Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 26 Jun 2019 17:44:21 +0200 -Subject: [PATCH 278/328] Revert "rtmutex: Handle the various new futex race - conditions" - -[ Upstream commit 9e0265c21af4d6388d47dcd5ce20f76ec3a2e468 ] - -Drop the RT fixup, the futex code will be changed to avoid the need for -the workaround. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/futex.c | 77 +++++++-------------------------- - kernel/locking/rtmutex.c | 36 +++------------ - kernel/locking/rtmutex_common.h | 2 - - 3 files changed, 21 insertions(+), 94 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index 38f53b95e370..6ee55df4f3de 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -2150,16 +2150,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - requeue_pi_wake_futex(this, &key2, hb2); - drop_count++; - continue; -- } else if (ret == -EAGAIN) { -- /* -- * Waiter was woken by timeout or -- * signal and has set pi_blocked_on to -- * PI_WAKEUP_INPROGRESS before we -- * tried to enqueue it on the rtmutex. -- */ -- this->pi_state = NULL; -- put_pi_state(pi_state); -- continue; - } else if (ret) { - /* - * rt_mutex_start_proxy_lock() detected a -@@ -3238,7 +3228,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - struct hrtimer_sleeper timeout, *to = NULL; - struct futex_pi_state *pi_state = NULL; - struct rt_mutex_waiter rt_waiter; -- struct futex_hash_bucket *hb, *hb2; -+ struct futex_hash_bucket *hb; - union futex_key key2 = FUTEX_KEY_INIT; - struct futex_q q = futex_q_init; - int res, ret; -@@ -3296,55 +3286,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue_me(hb, &q, to); - -- /* -- * On RT we must avoid races with requeue and trying to block -- * on two mutexes (hb->lock and uaddr2's rtmutex) by -- * serializing access to pi_blocked_on with pi_lock. -- */ -- raw_spin_lock_irq(¤t->pi_lock); -- if (current->pi_blocked_on) { -- /* -- * We have been requeued or are in the process of -- * being requeued. -- */ -- raw_spin_unlock_irq(¤t->pi_lock); -- } else { -- /* -- * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS -- * prevents a concurrent requeue from moving us to the -- * uaddr2 rtmutex. After that we can safely acquire -- * (and possibly block on) hb->lock. -- */ -- current->pi_blocked_on = PI_WAKEUP_INPROGRESS; -- raw_spin_unlock_irq(¤t->pi_lock); -- -- spin_lock(&hb->lock); -- -- /* -- * Clean up pi_blocked_on. We might leak it otherwise -- * when we succeeded with the hb->lock in the fast -- * path. -- */ -- raw_spin_lock_irq(¤t->pi_lock); -- current->pi_blocked_on = NULL; -- raw_spin_unlock_irq(¤t->pi_lock); -- -- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -- spin_unlock(&hb->lock); -- if (ret) -- goto out_put_keys; -- } -+ spin_lock(&hb->lock); -+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -+ spin_unlock(&hb->lock); -+ if (ret) -+ goto out_put_keys; - - /* -- * In order to be here, we have either been requeued, are in -- * the process of being requeued, or requeue successfully -- * acquired uaddr2 on our behalf. If pi_blocked_on was -- * non-null above, we may be racing with a requeue. Do not -- * rely on q->lock_ptr to be hb2->lock until after blocking on -- * hb->lock or hb2->lock. The futex_requeue dropped our key1 -- * reference and incremented our key2 reference count. -+ * In order for us to be here, we know our q.key == key2, and since -+ * we took the hb->lock above, we also know that futex_requeue() has -+ * completed and we no longer have to concern ourselves with a wakeup -+ * race with the atomic proxy lock acquisition by the requeue code. The -+ * futex_requeue dropped our key1 reference and incremented our key2 -+ * reference count. - */ -- hb2 = hash_futex(&key2); - - /* Check if the requeue code acquired the second futex for us. */ - if (!q.rt_waiter) { -@@ -3353,8 +3308,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -- spin_lock(&hb2->lock); -- BUG_ON(&hb2->lock != q.lock_ptr); -+ spin_lock(q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; -@@ -3365,7 +3319,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * the requeue_pi() code acquired for us. - */ - put_pi_state(q.pi_state); -- spin_unlock(&hb2->lock); -+ spin_unlock(q.lock_ptr); - } - } else { - struct rt_mutex *pi_mutex; -@@ -3379,8 +3333,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - pi_mutex = &q.pi_state->pi_mutex; - ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - -- spin_lock(&hb2->lock); -- BUG_ON(&hb2->lock != q.lock_ptr); -+ spin_lock(q.lock_ptr); - if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) - ret = 0; - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 7f6f402e04ae..44a33057a83a 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -142,11 +142,6 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) - WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); - } - --static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) --{ -- return waiter && waiter != PI_WAKEUP_INPROGRESS; --} -- - /* - * We can speed up the acquire/release, if there's no debugging state to be - * set up. -@@ -420,8 +415,7 @@ int max_lock_depth = 1024; - - static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) - { -- return rt_mutex_real_waiter(p->pi_blocked_on) ? -- p->pi_blocked_on->lock : NULL; -+ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; - } - - /* -@@ -557,7 +551,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, - * reached or the state of the chain has changed while we - * dropped the locks. - */ -- if (!rt_mutex_real_waiter(waiter)) -+ if (!waiter) - goto out_unlock_pi; - - /* -@@ -1327,22 +1321,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - return -EDEADLK; - - raw_spin_lock(&task->pi_lock); -- /* -- * In the case of futex requeue PI, this will be a proxy -- * lock. The task will wake unaware that it is enqueueed on -- * this lock. Avoid blocking on two locks and corrupting -- * pi_blocked_on via the PI_WAKEUP_INPROGRESS -- * flag. futex_wait_requeue_pi() sets this when it wakes up -- * before requeue (due to a signal or timeout). Do not enqueue -- * the task if PI_WAKEUP_INPROGRESS is set. -- */ -- if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { -- raw_spin_unlock(&task->pi_lock); -- return -EAGAIN; -- } -- -- BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); -- - waiter->task = task; - waiter->lock = lock; - waiter->prio = task->prio; -@@ -1366,7 +1344,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - rt_mutex_enqueue_pi(owner, waiter); - - rt_mutex_adjust_prio(owner); -- if (rt_mutex_real_waiter(owner->pi_blocked_on)) -+ if (owner->pi_blocked_on) - chain_walk = 1; - } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { - chain_walk = 1; -@@ -1466,7 +1444,7 @@ static void remove_waiter(struct rt_mutex *lock, - { - bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); - struct task_struct *owner = rt_mutex_owner(lock); -- struct rt_mutex *next_lock = NULL; -+ struct rt_mutex *next_lock; - - lockdep_assert_held(&lock->wait_lock); - -@@ -1492,8 +1470,7 @@ static void remove_waiter(struct rt_mutex *lock, - rt_mutex_adjust_prio(owner); - - /* Store the lock on which owner is blocked or NULL */ -- if (rt_mutex_real_waiter(owner->pi_blocked_on)) -- next_lock = task_blocked_on_lock(owner); -+ next_lock = task_blocked_on_lock(owner); - - raw_spin_unlock(&owner->pi_lock); - -@@ -1529,8 +1506,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) - raw_spin_lock_irqsave(&task->pi_lock, flags); - - waiter = task->pi_blocked_on; -- if (!rt_mutex_real_waiter(waiter) || -- rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { -+ if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return; - } -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index a501f3b47081..758dc43872e5 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -132,8 +132,6 @@ enum rtmutex_chainwalk { - /* - * PI-futex support (proxy locking functions, etc.): - */ --#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -- - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch b/kernel/patches-4.19.x-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch deleted file mode 100644 index 43d32b361..000000000 --- a/kernel/patches-4.19.x-rt/0279-Revert-futex-workaround-migrate_disable-enable-in-di.patch +++ /dev/null @@ -1,69 +0,0 @@ -From af703381dea29f442e4b3a7c4d8998379034f59a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 26 Jun 2019 17:44:27 +0200 -Subject: [PATCH 279/328] Revert "futex: workaround migrate_disable/enable in - different context" - -[ Upstream commit a71221d81cc4873891ae44f3aa02df596079b786 ] - -Drop the RT fixup, the futex code will be changed to avoid the need for -the workaround. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/futex.c | 19 ------------------- - 1 file changed, 19 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index 6ee55df4f3de..f636dcc706ec 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -2878,14 +2878,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - * before __rt_mutex_start_proxy_lock() is done. - */ - raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); -- /* -- * the migrate_disable() here disables migration in the in_atomic() fast -- * path which is enabled again in the following spin_unlock(). We have -- * one migrate_disable() pending in the slow-path which is reversed -- * after the raw_spin_unlock_irq() where we leave the atomic context. -- */ -- migrate_disable(); -- - spin_unlock(q.lock_ptr); - /* - * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter -@@ -2894,7 +2886,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - */ - ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); - raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); -- migrate_enable(); - - if (ret) { - if (ret == 1) -@@ -3043,21 +3034,11 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - * rt_waiter. Also see the WARN in wake_futex_pi(). - */ - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); -- /* -- * Magic trickery for now to make the RT migrate disable -- * logic happy. The following spin_unlock() happens with -- * interrupts disabled so the internal migrate_enable() -- * won't undo the migrate_disable() which was issued when -- * locking hb->lock. -- */ -- migrate_disable(); - spin_unlock(&hb->lock); - - /* drops pi_state->pi_mutex.wait_lock */ - ret = wake_futex_pi(uaddr, uval, pi_state); - -- migrate_enable(); -- - put_pi_state(pi_state); - - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch b/kernel/patches-4.19.x-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch deleted file mode 100644 index 694760499..000000000 --- a/kernel/patches-4.19.x-rt/0280-futex-Make-the-futex_hash_bucket-lock-raw.patch +++ /dev/null @@ -1,348 +0,0 @@ -From 44953b81eb56f8bf4910a8d447d7e3695c25370e Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 26 Jun 2019 11:59:44 +0200 -Subject: [PATCH 280/328] futex: Make the futex_hash_bucket lock raw - -[ Upstream commit f646521aadedab78801c9befe193e2e8a0c99298 ] - -Since commit 1a1fb985f2e2b ("futex: Handle early deadlock return -correctly") we can deadlock while we attempt to acquire the HB lock if -we fail to acquire the lock. -The RT waiter (for the futex lock) is still enqueued and acquiring the -HB lock may build up a lock chain which leads to a deadlock if the owner -of the lock futex-lock holds the HB lock. - -Make the hash bucket lock raw so it does not participate in the -lockchain. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/futex.c | 88 +++++++++++++++++++++++++------------------------- - 1 file changed, 44 insertions(+), 44 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index f636dcc706ec..a9d9283605e5 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -243,7 +243,7 @@ struct futex_q { - struct plist_node list; - - struct task_struct *task; -- spinlock_t *lock_ptr; -+ raw_spinlock_t *lock_ptr; - union futex_key key; - struct futex_pi_state *pi_state; - struct rt_mutex_waiter *rt_waiter; -@@ -264,7 +264,7 @@ static const struct futex_q futex_q_init = { - */ - struct futex_hash_bucket { - atomic_t waiters; -- spinlock_t lock; -+ raw_spinlock_t lock; - struct plist_head chain; - } ____cacheline_aligned_in_smp; - -@@ -911,7 +911,7 @@ void exit_pi_state_list(struct task_struct *curr) - } - raw_spin_unlock_irq(&curr->pi_lock); - -- spin_lock(&hb->lock); -+ raw_spin_lock(&hb->lock); - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - raw_spin_lock(&curr->pi_lock); - /* -@@ -921,7 +921,7 @@ void exit_pi_state_list(struct task_struct *curr) - if (head->next != next) { - /* retain curr->pi_lock for the loop invariant */ - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - put_pi_state(pi_state); - continue; - } -@@ -933,7 +933,7 @@ void exit_pi_state_list(struct task_struct *curr) - - raw_spin_unlock(&curr->pi_lock); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); -@@ -1427,7 +1427,7 @@ static void __unqueue_futex(struct futex_q *q) - { - struct futex_hash_bucket *hb; - -- if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) -+ if (WARN_ON_SMP(!q->lock_ptr || !raw_spin_is_locked(q->lock_ptr)) - || WARN_ON(plist_node_empty(&q->list))) - return; - -@@ -1555,21 +1555,21 @@ static inline void - double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) - { - if (hb1 <= hb2) { -- spin_lock(&hb1->lock); -+ raw_spin_lock(&hb1->lock); - if (hb1 < hb2) -- spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); -+ raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); - } else { /* hb1 > hb2 */ -- spin_lock(&hb2->lock); -- spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); -+ raw_spin_lock(&hb2->lock); -+ raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); - } - } - - static inline void - double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) - { -- spin_unlock(&hb1->lock); -+ raw_spin_unlock(&hb1->lock); - if (hb1 != hb2) -- spin_unlock(&hb2->lock); -+ raw_spin_unlock(&hb2->lock); - } - - /* -@@ -1597,7 +1597,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) - if (!hb_waiters_pending(hb)) - goto out_put_key; - -- spin_lock(&hb->lock); -+ raw_spin_lock(&hb->lock); - - plist_for_each_entry_safe(this, next, &hb->chain, list) { - if (match_futex (&this->key, &key)) { -@@ -1616,7 +1616,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) - } - } - -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - wake_up_q(&wake_q); - out_put_key: - put_futex_key(&key); -@@ -2221,7 +2221,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) - - q->lock_ptr = &hb->lock; - -- spin_lock(&hb->lock); /* implies smp_mb(); (A) */ -+ raw_spin_lock(&hb->lock); /* implies smp_mb(); (A) */ - return hb; - } - -@@ -2229,7 +2229,7 @@ static inline void - queue_unlock(struct futex_hash_bucket *hb) - __releases(&hb->lock) - { -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - hb_waiters_dec(hb); - } - -@@ -2268,7 +2268,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) - __releases(&hb->lock) - { - __queue_me(q, hb); -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - } - - /** -@@ -2284,41 +2284,41 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) - */ - static int unqueue_me(struct futex_q *q) - { -- spinlock_t *lock_ptr; -+ raw_spinlock_t *lock_ptr; - int ret = 0; - - /* In the common case we don't take the spinlock, which is nice. */ - retry: - /* -- * q->lock_ptr can change between this read and the following spin_lock. -- * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and -- * optimizing lock_ptr out of the logic below. -+ * q->lock_ptr can change between this read and the following -+ * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading -+ * q->lock_ptr and optimizing lock_ptr out of the logic below. - */ - lock_ptr = READ_ONCE(q->lock_ptr); - if (lock_ptr != NULL) { -- spin_lock(lock_ptr); -+ raw_spin_lock(lock_ptr); - /* - * q->lock_ptr can change between reading it and -- * spin_lock(), causing us to take the wrong lock. This -+ * raw_spin_lock(), causing us to take the wrong lock. This - * corrects the race condition. - * - * Reasoning goes like this: if we have the wrong lock, - * q->lock_ptr must have changed (maybe several times) -- * between reading it and the spin_lock(). It can -- * change again after the spin_lock() but only if it was -- * already changed before the spin_lock(). It cannot, -+ * between reading it and the raw_spin_lock(). It can -+ * change again after the raw_spin_lock() but only if it was -+ * already changed before the raw_spin_lock(). It cannot, - * however, change back to the original value. Therefore - * we can detect whether we acquired the correct lock. - */ - if (unlikely(lock_ptr != q->lock_ptr)) { -- spin_unlock(lock_ptr); -+ raw_spin_unlock(lock_ptr); - goto retry; - } - __unqueue_futex(q); - - BUG_ON(q->pi_state); - -- spin_unlock(lock_ptr); -+ raw_spin_unlock(lock_ptr); - ret = 1; - } - -@@ -2340,7 +2340,7 @@ static void unqueue_me_pi(struct futex_q *q) - put_pi_state(q->pi_state); - q->pi_state = NULL; - -- spin_unlock(q->lock_ptr); -+ raw_spin_unlock(q->lock_ptr); - } - - static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, -@@ -2473,7 +2473,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - */ - handle_err: - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); -- spin_unlock(q->lock_ptr); -+ raw_spin_unlock(q->lock_ptr); - - switch (err) { - case -EFAULT: -@@ -2491,7 +2491,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - break; - } - -- spin_lock(q->lock_ptr); -+ raw_spin_lock(q->lock_ptr); - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - - /* -@@ -2587,7 +2587,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, - /* - * The task state is guaranteed to be set before another task can - * wake it. set_current_state() is implemented using smp_store_mb() and -- * queue_me() calls spin_unlock() upon completion, both serializing -+ * queue_me() calls raw_spin_unlock() upon completion, both serializing - * access to the hash list and forcing another memory barrier. - */ - set_current_state(TASK_INTERRUPTIBLE); -@@ -2878,7 +2878,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - * before __rt_mutex_start_proxy_lock() is done. - */ - raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); -- spin_unlock(q.lock_ptr); -+ raw_spin_unlock(q.lock_ptr); - /* - * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter - * such that futex_unlock_pi() is guaranteed to observe the waiter when -@@ -2899,7 +2899,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); - - cleanup: -- spin_lock(q.lock_ptr); -+ raw_spin_lock(q.lock_ptr); - /* - * If we failed to acquire the lock (deadlock/signal/timeout), we must - * first acquire the hb->lock before removing the lock from the -@@ -3000,7 +3000,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - return ret; - - hb = hash_futex(&key); -- spin_lock(&hb->lock); -+ raw_spin_lock(&hb->lock); - - /* - * Check waiters first. We do not trust user space values at -@@ -3034,7 +3034,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - * rt_waiter. Also see the WARN in wake_futex_pi(). - */ - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - - /* drops pi_state->pi_mutex.wait_lock */ - ret = wake_futex_pi(uaddr, uval, pi_state); -@@ -3073,7 +3073,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - * owner. - */ - if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - switch (ret) { - case -EFAULT: - goto pi_faulted; -@@ -3093,7 +3093,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - ret = (curval == uval) ? 0 : -EAGAIN; - - out_unlock: -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - out_putkey: - put_futex_key(&key); - return ret; -@@ -3267,9 +3267,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue_me(hb, &q, to); - -- spin_lock(&hb->lock); -+ raw_spin_lock(&hb->lock); - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -- spin_unlock(&hb->lock); -+ raw_spin_unlock(&hb->lock); - if (ret) - goto out_put_keys; - -@@ -3289,7 +3289,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -- spin_lock(q.lock_ptr); -+ raw_spin_lock(q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; -@@ -3300,7 +3300,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * the requeue_pi() code acquired for us. - */ - put_pi_state(q.pi_state); -- spin_unlock(q.lock_ptr); -+ raw_spin_unlock(q.lock_ptr); - } - } else { - struct rt_mutex *pi_mutex; -@@ -3314,7 +3314,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - pi_mutex = &q.pi_state->pi_mutex; - ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - -- spin_lock(q.lock_ptr); -+ raw_spin_lock(q.lock_ptr); - if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) - ret = 0; - -@@ -3981,7 +3981,7 @@ static int __init futex_init(void) - for (i = 0; i < futex_hashsize; i++) { - atomic_set(&futex_queues[i].waiters, 0); - plist_head_init(&futex_queues[i].chain); -- spin_lock_init(&futex_queues[i].lock); -+ raw_spin_lock_init(&futex_queues[i].lock); - } - - return 0; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0281-futex-Delay-deallocation-of-pi_state.patch b/kernel/patches-4.19.x-rt/0281-futex-Delay-deallocation-of-pi_state.patch deleted file mode 100644 index e9362854c..000000000 --- a/kernel/patches-4.19.x-rt/0281-futex-Delay-deallocation-of-pi_state.patch +++ /dev/null @@ -1,182 +0,0 @@ -From b4da585eac644bd89c26ced3290b61d735d50481 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Wed, 26 Jun 2019 13:35:36 +0200 -Subject: [PATCH 281/328] futex: Delay deallocation of pi_state - -[ Upstream commit d7c7cf8cb68b7df17e6e50be1f25f35d83e686c7 ] - -On -RT we can't invoke kfree() in a non-preemptible context. - -Defer the deallocation of pi_state to preemptible context. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/futex.c | 55 ++++++++++++++++++++++++++++++++++++++++---------- - 1 file changed, 44 insertions(+), 11 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index a9d9283605e5..0b8cff8d9162 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -825,13 +825,13 @@ static void get_pi_state(struct futex_pi_state *pi_state) - * Drops a reference to the pi_state object and frees or caches it - * when the last reference is gone. - */ --static void put_pi_state(struct futex_pi_state *pi_state) -+static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) - { - if (!pi_state) -- return; -+ return NULL; - - if (!atomic_dec_and_test(&pi_state->refcount)) -- return; -+ return NULL; - - /* - * If pi_state->owner is NULL, the owner is most probably dying -@@ -851,9 +851,7 @@ static void put_pi_state(struct futex_pi_state *pi_state) - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - } - -- if (current->pi_state_cache) { -- kfree(pi_state); -- } else { -+ if (!current->pi_state_cache) { - /* - * pi_state->list is already empty. - * clear pi_state->owner. -@@ -862,6 +860,30 @@ static void put_pi_state(struct futex_pi_state *pi_state) - pi_state->owner = NULL; - atomic_set(&pi_state->refcount, 1); - current->pi_state_cache = pi_state; -+ pi_state = NULL; -+ } -+ return pi_state; -+} -+ -+static void put_pi_state(struct futex_pi_state *pi_state) -+{ -+ kfree(__put_pi_state(pi_state)); -+} -+ -+static void put_pi_state_atomic(struct futex_pi_state *pi_state, -+ struct list_head *to_free) -+{ -+ if (__put_pi_state(pi_state)) -+ list_add(&pi_state->list, to_free); -+} -+ -+static void free_pi_state_list(struct list_head *to_free) -+{ -+ struct futex_pi_state *p, *next; -+ -+ list_for_each_entry_safe(p, next, to_free, list) { -+ list_del(&p->list); -+ kfree(p); - } - } - -@@ -878,6 +900,7 @@ void exit_pi_state_list(struct task_struct *curr) - struct futex_pi_state *pi_state; - struct futex_hash_bucket *hb; - union futex_key key = FUTEX_KEY_INIT; -+ LIST_HEAD(to_free); - - if (!futex_cmpxchg_enabled) - return; -@@ -922,7 +945,7 @@ void exit_pi_state_list(struct task_struct *curr) - /* retain curr->pi_lock for the loop invariant */ - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); - raw_spin_unlock(&hb->lock); -- put_pi_state(pi_state); -+ put_pi_state_atomic(pi_state, &to_free); - continue; - } - -@@ -941,6 +964,8 @@ void exit_pi_state_list(struct task_struct *curr) - raw_spin_lock_irq(&curr->pi_lock); - } - raw_spin_unlock_irq(&curr->pi_lock); -+ -+ free_pi_state_list(&to_free); - } - - #endif -@@ -1923,6 +1948,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - struct futex_hash_bucket *hb1, *hb2; - struct futex_q *this, *next; - DEFINE_WAKE_Q(wake_q); -+ LIST_HEAD(to_free); - - if (nr_wake < 0 || nr_requeue < 0) - return -EINVAL; -@@ -2160,7 +2186,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - * object. - */ - this->pi_state = NULL; -- put_pi_state(pi_state); -+ put_pi_state_atomic(pi_state, &to_free); - /* - * We stop queueing more waiters and let user - * space deal with the mess. -@@ -2177,7 +2203,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We - * need to drop it here again. - */ -- put_pi_state(pi_state); -+ put_pi_state_atomic(pi_state, &to_free); - - out_unlock: - double_unlock_hb(hb1, hb2); -@@ -2198,6 +2224,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - out_put_key1: - put_futex_key(&key1); - out: -+ free_pi_state_list(&to_free); - return ret ? ret : task_count; - } - -@@ -2334,13 +2361,16 @@ static int unqueue_me(struct futex_q *q) - static void unqueue_me_pi(struct futex_q *q) - __releases(q->lock_ptr) - { -+ struct futex_pi_state *ps; -+ - __unqueue_futex(q); - - BUG_ON(!q->pi_state); -- put_pi_state(q->pi_state); -+ ps = __put_pi_state(q->pi_state); - q->pi_state = NULL; - - raw_spin_unlock(q->lock_ptr); -+ kfree(ps); - } - - static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, -@@ -3289,6 +3319,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -+ struct futex_pi_state *ps_free; -+ - raw_spin_lock(q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { -@@ -3299,8 +3331,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * Drop the reference to the pi state which - * the requeue_pi() code acquired for us. - */ -- put_pi_state(q.pi_state); -+ ps_free = __put_pi_state(q.pi_state); - raw_spin_unlock(q.lock_ptr); -+ kfree(ps_free); - } - } else { - struct rt_mutex *pi_mutex; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch b/kernel/patches-4.19.x-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch deleted file mode 100644 index 96613b458..000000000 --- a/kernel/patches-4.19.x-rt/0282-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch +++ /dev/null @@ -1,126 +0,0 @@ -From 4ae018a1f3fabfd075da5e5e1c764d47df47c191 Mon Sep 17 00:00:00 2001 -From: "Luis Claudio R. Goncalves" -Date: Tue, 25 Jun 2019 11:28:04 -0300 -Subject: [PATCH 282/328] mm/zswap: Do not disable preemption in - zswap_frontswap_store() - -[ Upstream commit 4e4cf4be79635e67144632d9135286381acbc95a ] - -Zswap causes "BUG: scheduling while atomic" by blocking on a rt_spin_lock() with -preemption disabled. The preemption is disabled by get_cpu_var() in -zswap_frontswap_store() to protect the access of the zswap_dstmem percpu variable. - -Use get_locked_var() to protect the percpu zswap_dstmem variable, making the -code preemptive. - -As get_cpu_ptr() also disables preemption, replace it by this_cpu_ptr() and -remove the counterpart put_cpu_ptr(). - -Steps to Reproduce: - - 1. # grubby --args "zswap.enabled=1" --update-kernel DEFAULT - 2. # reboot - 3. Calculate the amount o memory to be used by the test: - ---> grep MemAvailable /proc/meminfo - ---> Add 25% ~ 50% to that value - 4. # stress --vm 1 --vm-bytes ${MemAvailable+25%} --timeout 240s - -Usually, in less than 5 minutes the backtrace listed below appears, followed -by a kernel panic: - -| BUG: scheduling while atomic: kswapd1/181/0x00000002 -| -| Preemption disabled at: -| [] zswap_frontswap_store+0x21a/0x6e1 -| -| Kernel panic - not syncing: scheduling while atomic -| CPU: 14 PID: 181 Comm: kswapd1 Kdump: loaded Not tainted 5.0.14-rt9 #1 -| Hardware name: AMD Pence/Pence, BIOS WPN2321X_Weekly_12_03_21 03/19/2012 -| Call Trace: -| panic+0x106/0x2a7 -| __schedule_bug.cold+0x3f/0x51 -| __schedule+0x5cb/0x6f0 -| schedule+0x43/0xd0 -| rt_spin_lock_slowlock_locked+0x114/0x2b0 -| rt_spin_lock_slowlock+0x51/0x80 -| zbud_alloc+0x1da/0x2d0 -| zswap_frontswap_store+0x31a/0x6e1 -| __frontswap_store+0xab/0x130 -| swap_writepage+0x39/0x70 -| pageout.isra.0+0xe3/0x320 -| shrink_page_list+0xa8e/0xd10 -| shrink_inactive_list+0x251/0x840 -| shrink_node_memcg+0x213/0x770 -| shrink_node+0xd9/0x450 -| balance_pgdat+0x2d5/0x510 -| kswapd+0x218/0x470 -| kthread+0xfb/0x130 -| ret_from_fork+0x27/0x50 - -Cc: stable-rt@vger.kernel.org -Reported-by: Ping Fang -Signed-off-by: Luis Claudio R. Goncalves -Reviewed-by: Daniel Bristot de Oliveira -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - mm/zswap.c | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) - -diff --git a/mm/zswap.c b/mm/zswap.c -index cd91fd9d96b8..420225d3ff0b 100644 ---- a/mm/zswap.c -+++ b/mm/zswap.c -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -990,6 +991,8 @@ static void zswap_fill_page(void *ptr, unsigned long value) - memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); - } - -+/* protect zswap_dstmem from concurrency */ -+static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock); - /********************************* - * frontswap hooks - **********************************/ -@@ -1066,12 +1069,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, - } - - /* compress */ -- dst = get_cpu_var(zswap_dstmem); -- tfm = *get_cpu_ptr(entry->pool->tfm); -+ dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem); -+ tfm = *this_cpu_ptr(entry->pool->tfm); - src = kmap_atomic(page); - ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen); - kunmap_atomic(src); -- put_cpu_ptr(entry->pool->tfm); - if (ret) { - ret = -EINVAL; - goto put_dstmem; -@@ -1094,7 +1096,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, - memcpy(buf, &zhdr, hlen); - memcpy(buf + hlen, dst, dlen); - zpool_unmap_handle(entry->pool->zpool, handle); -- put_cpu_var(zswap_dstmem); -+ put_locked_var(zswap_dstmem_lock, zswap_dstmem); - - /* populate entry */ - entry->offset = offset; -@@ -1122,7 +1124,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, - return 0; - - put_dstmem: -- put_cpu_var(zswap_dstmem); -+ put_locked_var(zswap_dstmem_lock, zswap_dstmem); - zswap_pool_put(entry->pool); - freepage: - zswap_entry_cache_free(entry); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0283-revert-aio.patch b/kernel/patches-4.19.x-rt/0283-revert-aio.patch deleted file mode 100644 index c0666290e..000000000 --- a/kernel/patches-4.19.x-rt/0283-revert-aio.patch +++ /dev/null @@ -1,70 +0,0 @@ -From ab2dc29034ff92024537b768305a45a2d4b138fb Mon Sep 17 00:00:00 2001 -From: "Steven Rostedt (VMware)" -Date: Fri, 20 Sep 2019 17:50:53 -0400 -Subject: [PATCH 283/328] revert-aio - -revert: fs/aio: simple simple work - -Signed-off-by: Steven Rostedt (VMware) ---- - fs/aio.c | 15 ++------------- - 1 file changed, 2 insertions(+), 13 deletions(-) - -diff --git a/fs/aio.c b/fs/aio.c -index 93f8cf7fdeab..b5fbf2061868 100644 ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -42,7 +42,6 @@ - #include - #include - #include --#include - - #include - #include -@@ -122,7 +121,6 @@ struct kioctx { - long nr_pages; - - struct rcu_work free_rwork; /* see free_ioctx() */ -- struct swork_event free_swork; /* see free_ioctx() */ - - /* - * signals when all in-flight requests are done -@@ -267,7 +265,6 @@ static int __init aio_setup(void) - .mount = aio_mount, - .kill_sb = kill_anon_super, - }; -- BUG_ON(swork_get()); - aio_mnt = kern_mount(&aio_fs); - if (IS_ERR(aio_mnt)) - panic("Failed to create aio fs mount."); -@@ -609,9 +606,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users_work(struct swork_event *sev) -+static void free_ioctx_users(struct percpu_ref *ref) - { -- struct kioctx *ctx = container_of(sev, struct kioctx, free_swork); -+ struct kioctx *ctx = container_of(ref, struct kioctx, users); - struct aio_kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -629,14 +626,6 @@ static void free_ioctx_users_work(struct swork_event *sev) - percpu_ref_put(&ctx->reqs); - } - --static void free_ioctx_users(struct percpu_ref *ref) --{ -- struct kioctx *ctx = container_of(ref, struct kioctx, users); -- -- INIT_SWORK(&ctx->free_swork, free_ioctx_users_work); -- swork_queue(&ctx->free_swork); --} -- - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) - { - unsigned i, new_nr; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0284-fs-aio-simple-simple-work.patch b/kernel/patches-4.19.x-rt/0284-fs-aio-simple-simple-work.patch deleted file mode 100644 index 9ba1a068a..000000000 --- a/kernel/patches-4.19.x-rt/0284-fs-aio-simple-simple-work.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 81433e7d902d1cc6192315d52f22c51ab06a2e63 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 16 Feb 2015 18:49:10 +0100 -Subject: [PATCH 284/328] fs/aio: simple simple work - -[ Upstream commit 1a142116f6435ef070ecebb66d2d599507c10601 ] - -|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768 -|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2 -|2 locks held by rcuos/2/26: -| #0: (rcu_callback){.+.+..}, at: [] rcu_nocb_kthread+0x1e2/0x380 -| #1: (rcu_read_lock_sched){.+.+..}, at: [] percpu_ref_kill_rcu+0xa6/0x1c0 -|Preemption disabled at:[] rcu_nocb_kthread+0x263/0x380 -|Call Trace: -| [] dump_stack+0x4e/0x9c -| [] __might_sleep+0xfb/0x170 -| [] rt_spin_lock+0x24/0x70 -| [] free_ioctx_users+0x30/0x130 -| [] percpu_ref_kill_rcu+0x1b4/0x1c0 -| [] rcu_nocb_kthread+0x263/0x380 -| [] kthread+0xd6/0xf0 -| [] ret_from_fork+0x7c/0xb0 - -replace this preempt_disable() friendly swork. - -Reported-By: Mike Galbraith -Suggested-by: Benjamin LaHaise -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - fs/aio.c | 13 +++++++++++-- - 1 file changed, 11 insertions(+), 2 deletions(-) - -diff --git a/fs/aio.c b/fs/aio.c -index b5fbf2061868..a92119e05869 100644 ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -121,6 +121,7 @@ struct kioctx { - long nr_pages; - - struct rcu_work free_rwork; /* see free_ioctx() */ -+ struct kthread_work free_kwork; /* see free_ioctx() */ - - /* - * signals when all in-flight requests are done -@@ -606,9 +607,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users(struct percpu_ref *ref) -+static void free_ioctx_users_work(struct kthread_work *work) - { -- struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ struct kioctx *ctx = container_of(work, struct kioctx, free_kwork); - struct aio_kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -626,6 +627,14 @@ static void free_ioctx_users(struct percpu_ref *ref) - percpu_ref_put(&ctx->reqs); - } - -+static void free_ioctx_users(struct percpu_ref *ref) -+{ -+ struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ -+ kthread_init_work(&ctx->free_kwork, free_ioctx_users_work); -+ kthread_schedule_work(&ctx->free_kwork); -+} -+ - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) - { - unsigned i, new_nr; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0285-revert-thermal.patch b/kernel/patches-4.19.x-rt/0285-revert-thermal.patch deleted file mode 100644 index 42dcce5ec..000000000 --- a/kernel/patches-4.19.x-rt/0285-revert-thermal.patch +++ /dev/null @@ -1,119 +0,0 @@ -From c02161642bb7a1a67a5deeca8370d86f1901a30c Mon Sep 17 00:00:00 2001 -From: "Steven Rostedt (VMware)" -Date: Fri, 20 Sep 2019 17:50:53 -0400 -Subject: [PATCH 285/328] revert-thermal - -Revert: thermal: Defer thermal wakups to threads - -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/thermal/x86_pkg_temp_thermal.c | 52 ++------------------------ - 1 file changed, 3 insertions(+), 49 deletions(-) - -diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c -index a5991cbb408f..1ef937d799e4 100644 ---- a/drivers/thermal/x86_pkg_temp_thermal.c -+++ b/drivers/thermal/x86_pkg_temp_thermal.c -@@ -29,7 +29,6 @@ - #include - #include - #include --#include - #include - #include - -@@ -330,7 +329,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) - schedule_delayed_work_on(cpu, work, ms); - } - --static void pkg_thermal_notify_work(struct swork_event *event) -+static int pkg_thermal_notify(u64 msr_val) - { - int cpu = smp_processor_id(); - struct pkg_device *pkgdev; -@@ -349,47 +348,9 @@ static void pkg_thermal_notify_work(struct swork_event *event) - } - - spin_unlock_irqrestore(&pkg_temp_lock, flags); --} -- --#ifdef CONFIG_PREEMPT_RT_FULL --static struct swork_event notify_work; -- --static int pkg_thermal_notify_work_init(void) --{ -- int err; -- -- err = swork_get(); -- if (err) -- return err; -- -- INIT_SWORK(¬ify_work, pkg_thermal_notify_work); - return 0; - } - --static void pkg_thermal_notify_work_cleanup(void) --{ -- swork_put(); --} -- --static int pkg_thermal_notify(u64 msr_val) --{ -- swork_queue(¬ify_work); -- return 0; --} -- --#else /* !CONFIG_PREEMPT_RT_FULL */ -- --static int pkg_thermal_notify_work_init(void) { return 0; } -- --static void pkg_thermal_notify_work_cleanup(void) { } -- --static int pkg_thermal_notify(u64 msr_val) --{ -- pkg_thermal_notify_work(NULL); -- return 0; --} --#endif /* CONFIG_PREEMPT_RT_FULL */ -- - static int pkg_temp_thermal_device_add(unsigned int cpu) - { - int pkgid = topology_logical_package_id(cpu); -@@ -554,16 +515,11 @@ static int __init pkg_temp_thermal_init(void) - if (!x86_match_cpu(pkg_temp_thermal_ids)) - return -ENODEV; - -- if (!pkg_thermal_notify_work_init()) -- return -ENODEV; -- - max_packages = topology_max_packages(); - packages = kcalloc(max_packages, sizeof(struct pkg_device *), - GFP_KERNEL); -- if (!packages) { -- ret = -ENOMEM; -- goto err; -- } -+ if (!packages) -+ return -ENOMEM; - - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", - pkg_thermal_cpu_online, pkg_thermal_cpu_offline); -@@ -581,7 +537,6 @@ static int __init pkg_temp_thermal_init(void) - return 0; - - err: -- pkg_thermal_notify_work_cleanup(); - kfree(packages); - return ret; - } -@@ -595,7 +550,6 @@ static void __exit pkg_temp_thermal_exit(void) - cpuhp_remove_state(pkg_thermal_hp_state); - debugfs_remove_recursive(debugfs); - kfree(packages); -- pkg_thermal_notify_work_cleanup(); - } - module_exit(pkg_temp_thermal_exit) - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch b/kernel/patches-4.19.x-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch deleted file mode 100644 index 9a74d9dcb..000000000 --- a/kernel/patches-4.19.x-rt/0286-thermal-Defer-thermal-wakups-to-threads.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 6ce01aa7368b545641a370a0dde81ac0d4c5ba41 Mon Sep 17 00:00:00 2001 -From: Daniel Wagner -Date: Tue, 17 Feb 2015 09:37:44 +0100 -Subject: [PATCH 286/328] thermal: Defer thermal wakups to threads - -[ Upstream commit ad2408dc248fe58536eef5b2b5734d8f9d3a280b ] - -On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will -call schedule while we run in irq context. - -[] dump_stack+0x4e/0x8f -[] __schedule_bug+0xa6/0xb4 -[] __schedule+0x5b4/0x700 -[] schedule+0x2a/0x90 -[] rt_spin_lock_slowlock+0xe5/0x2d0 -[] rt_spin_lock+0x25/0x30 -[] pkg_temp_thermal_platform_thermal_notify+0x45/0x134 [x86_pkg_temp_thermal] -[] ? therm_throt_process+0x1b/0x160 -[] intel_thermal_interrupt+0x211/0x250 -[] smp_thermal_interrupt+0x21/0x40 -[] thermal_interrupt+0x6d/0x80 - -Let's defer the work to a kthread. - -Signed-off-by: Daniel Wagner -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: reoder init/denit position. TODO: flush swork on exit] -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/thermal/x86_pkg_temp_thermal.c | 28 +++++++++++++++++++++++++- - 1 file changed, 27 insertions(+), 1 deletion(-) - -diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c -index 1ef937d799e4..82f21fd4afb0 100644 ---- a/drivers/thermal/x86_pkg_temp_thermal.c -+++ b/drivers/thermal/x86_pkg_temp_thermal.c -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) - schedule_delayed_work_on(cpu, work, ms); - } - --static int pkg_thermal_notify(u64 msr_val) -+static void pkg_thermal_notify_work(struct kthread_work *work) - { - int cpu = smp_processor_id(); - struct pkg_device *pkgdev; -@@ -348,8 +349,32 @@ static int pkg_thermal_notify(u64 msr_val) - } - - spin_unlock_irqrestore(&pkg_temp_lock, flags); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static DEFINE_KTHREAD_WORK(notify_work, pkg_thermal_notify_work); -+ -+static int pkg_thermal_notify(u64 msr_val) -+{ -+ kthread_schedule_work(¬ify_work); -+ return 0; -+} -+ -+static void pkg_thermal_notify_flush(void) -+{ -+ kthread_flush_work(¬ify_work); -+} -+ -+#else /* !CONFIG_PREEMPT_RT_FULL */ -+ -+static void pkg_thermal_notify_flush(void) { } -+ -+static int pkg_thermal_notify(u64 msr_val) -+{ -+ pkg_thermal_notify_work(NULL); - return 0; - } -+#endif /* CONFIG_PREEMPT_RT_FULL */ - - static int pkg_temp_thermal_device_add(unsigned int cpu) - { -@@ -548,6 +573,7 @@ static void __exit pkg_temp_thermal_exit(void) - platform_thermal_package_rate_control = NULL; - - cpuhp_remove_state(pkg_thermal_hp_state); -+ pkg_thermal_notify_flush(); - debugfs_remove_recursive(debugfs); - kfree(packages); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0287-revert-block.patch b/kernel/patches-4.19.x-rt/0287-revert-block.patch deleted file mode 100644 index ffc6fe9ed..000000000 --- a/kernel/patches-4.19.x-rt/0287-revert-block.patch +++ /dev/null @@ -1,82 +0,0 @@ -From ffad188bfa63fa513c05850bf64eebe5e519c14d Mon Sep 17 00:00:00 2001 -From: "Steven Rostedt (VMware)" -Date: Fri, 20 Sep 2019 17:50:54 -0400 -Subject: [PATCH 287/328] revert-block - -Revert swork version of: block: blk-mq: move blk_queue_usage_counter_release() into process context - -In order to switch to upstream, we need to revert the swork code. - -Signed-off-by: Steven Rostedt (VMware) ---- - block/blk-core.c | 14 +------------- - include/linux/blkdev.h | 2 -- - 2 files changed, 1 insertion(+), 15 deletions(-) - -diff --git a/block/blk-core.c b/block/blk-core.c -index 13bf37156bb0..4860cd26cd5a 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -973,21 +973,12 @@ void blk_queue_exit(struct request_queue *q) - percpu_ref_put(&q->q_usage_counter); - } - --static void blk_queue_usage_counter_release_swork(struct swork_event *sev) --{ -- struct request_queue *q = -- container_of(sev, struct request_queue, mq_pcpu_wake); -- -- wake_up_all(&q->mq_freeze_wq); --} -- - static void blk_queue_usage_counter_release(struct percpu_ref *ref) - { - struct request_queue *q = - container_of(ref, struct request_queue, q_usage_counter); - -- if (wq_has_sleeper(&q->mq_freeze_wq)) -- swork_queue(&q->mq_pcpu_wake); -+ wake_up_all(&q->mq_freeze_wq); - } - - static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1084,7 +1075,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, - queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); - - init_waitqueue_head(&q->mq_freeze_wq); -- INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork); - - /* - * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3974,8 +3964,6 @@ int __init blk_dev_init(void) - if (!kblockd_workqueue) - panic("Failed to create kblockd\n"); - -- BUG_ON(swork_get()); -- - request_cachep = kmem_cache_create("blkdev_requests", - sizeof(struct request), 0, SLAB_PANIC, NULL); - -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 2f3b2e5196eb..111ab4209797 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -27,7 +27,6 @@ - #include - #include - #include --#include - - struct module; - struct scsi_ioctl_command; -@@ -656,7 +655,6 @@ struct request_queue { - #endif - struct rcu_head rcu_head; - wait_queue_head_t mq_freeze_wq; -- struct swork_event mq_pcpu_wake; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/kernel/patches-4.19.x-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch deleted file mode 100644 index 2ac9055bb..000000000 --- a/kernel/patches-4.19.x-rt/0288-block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 2793f1c2fe23bf8db5eb818139db6bdc8c0d0ff3 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 13 Mar 2018 13:49:16 +0100 -Subject: [PATCH 288/328] block: blk-mq: move blk_queue_usage_counter_release() - into process context - -[ Upstream commit 61c928ecf4fe200bda9b49a0813b5ba0f43995b5 ] - -| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914 -| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6 -| 5 locks held by kworker/u257:6/255: -| #0: ("events_unbound"){.+.+.+}, at: [] process_one_work+0x171/0x5e0 -| #1: ((&entry->work)){+.+.+.}, at: [] process_one_work+0x171/0x5e0 -| #2: (&shost->scan_mutex){+.+.+.}, at: [] __scsi_add_device+0xa3/0x130 [scsi_mod] -| #3: (&set->tag_list_lock){+.+...}, at: [] blk_mq_init_queue+0x96a/0xa50 -| #4: (rcu_read_lock_sched){......}, at: [] percpu_ref_kill_and_confirm+0x1d/0x120 -| Preemption disabled at:[] blk_mq_freeze_queue_start+0x56/0x70 -| -| CPU: 2 PID: 255 Comm: kworker/u257:6 Not tainted 3.18.7-rt0+ #1 -| Workqueue: events_unbound async_run_entry_fn -| 0000000000000003 ffff8800bc29f998 ffffffff815b3a12 0000000000000000 -| 0000000000000000 ffff8800bc29f9b8 ffffffff8109aa16 ffff8800bc29fa28 -| ffff8800bc5d1bc8 ffff8800bc29f9e8 ffffffff815b8dd4 ffff880000000000 -| Call Trace: -| [] dump_stack+0x4f/0x7c -| [] __might_sleep+0x116/0x190 -| [] rt_spin_lock+0x24/0x60 -| [] __wake_up+0x29/0x60 -| [] blk_mq_usage_counter_release+0x1e/0x20 -| [] percpu_ref_kill_and_confirm+0x106/0x120 -| [] blk_mq_freeze_queue_start+0x56/0x70 -| [] blk_mq_update_tag_set_depth+0x40/0xd0 -| [] blk_mq_init_queue+0x98c/0xa50 -| [] scsi_mq_alloc_queue+0x20/0x60 [scsi_mod] -| [] scsi_alloc_sdev+0x2f5/0x370 [scsi_mod] -| [] scsi_probe_and_add_lun+0x9e4/0xdd0 [scsi_mod] -| [] __scsi_add_device+0x126/0x130 [scsi_mod] -| [] ata_scsi_scan_host+0xaf/0x200 [libata] -| [] async_port_probe+0x46/0x60 [libata] -| [] async_run_entry_fn+0x3b/0xf0 -| [] process_one_work+0x201/0x5e0 - -percpu_ref_kill_and_confirm() invokes blk_mq_usage_counter_release() in -a rcu-sched region. swait based wake queue can't be used due to -wake_up_all() usage and disabled interrupts in !RT configs (as reported -by Corey Minyard). -The wq_has_sleeper() check has been suggested by Peter Zijlstra. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - block/blk-core.c | 12 +++++++++++- - include/linux/blkdev.h | 2 ++ - 2 files changed, 13 insertions(+), 1 deletion(-) - -diff --git a/block/blk-core.c b/block/blk-core.c -index 4860cd26cd5a..7d709465876e 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -973,12 +973,21 @@ void blk_queue_exit(struct request_queue *q) - percpu_ref_put(&q->q_usage_counter); - } - -+static void blk_queue_usage_counter_release_wrk(struct kthread_work *work) -+{ -+ struct request_queue *q = -+ container_of(work, struct request_queue, mq_pcpu_wake); -+ -+ wake_up_all(&q->mq_freeze_wq); -+} -+ - static void blk_queue_usage_counter_release(struct percpu_ref *ref) - { - struct request_queue *q = - container_of(ref, struct request_queue, q_usage_counter); - -- wake_up_all(&q->mq_freeze_wq); -+ if (wq_has_sleeper(&q->mq_freeze_wq)) -+ kthread_schedule_work(&q->mq_pcpu_wake); - } - - static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1075,6 +1084,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, - queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); - - init_waitqueue_head(&q->mq_freeze_wq); -+ kthread_init_work(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); - - /* - * Init percpu_ref in atomic mode so that it's faster to shutdown. -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 111ab4209797..981103415cd8 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -655,6 +656,7 @@ struct request_queue { - #endif - struct rcu_head rcu_head; - wait_queue_head_t mq_freeze_wq; -+ struct kthread_work mq_pcpu_wake; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0289-workqueue-rework.patch b/kernel/patches-4.19.x-rt/0289-workqueue-rework.patch deleted file mode 100644 index e2b03a7b2..000000000 --- a/kernel/patches-4.19.x-rt/0289-workqueue-rework.patch +++ /dev/null @@ -1,1568 +0,0 @@ -From 30d6766aa0a5077a70137d4bb9b044aa73fe9f7e Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 29 May 2019 18:52:27 +0200 -Subject: [PATCH 289/328] workqueue: rework - -[ Upstream commit d15a862f24df983458533aebd6fa207ecdd1095a ] - -This is an all-in change of the workqueue rework. -The worker_pool.lock is made to raw_spinlock_t. With this change we can -schedule workitems from preempt-disable sections and sections with disabled -interrupts. This change allows to remove all kthread_.* workarounds we used to -have. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - block/blk-core.c | 6 +- - drivers/block/loop.c | 2 +- - drivers/spi/spi-rockchip.c | 1 - - drivers/thermal/x86_pkg_temp_thermal.c | 28 +-- - fs/aio.c | 10 +- - include/linux/blk-cgroup.h | 2 +- - include/linux/blkdev.h | 2 +- - include/linux/interrupt.h | 5 - - include/linux/kthread-cgroup.h | 17 -- - include/linux/kthread.h | 15 +- - include/linux/swait.h | 14 ++ - include/linux/workqueue.h | 4 - - init/main.c | 1 - - kernel/irq/manage.c | 36 +-- - kernel/kthread.c | 14 -- - kernel/sched/core.c | 1 + - kernel/time/hrtimer.c | 24 -- - kernel/workqueue.c | 304 +++++++++++-------------- - 18 files changed, 166 insertions(+), 320 deletions(-) - delete mode 100644 include/linux/kthread-cgroup.h - -diff --git a/block/blk-core.c b/block/blk-core.c -index 7d709465876e..752c9e754509 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -973,7 +973,7 @@ void blk_queue_exit(struct request_queue *q) - percpu_ref_put(&q->q_usage_counter); - } - --static void blk_queue_usage_counter_release_wrk(struct kthread_work *work) -+static void blk_queue_usage_counter_release_wrk(struct work_struct *work) - { - struct request_queue *q = - container_of(work, struct request_queue, mq_pcpu_wake); -@@ -987,7 +987,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref) - container_of(ref, struct request_queue, q_usage_counter); - - if (wq_has_sleeper(&q->mq_freeze_wq)) -- kthread_schedule_work(&q->mq_pcpu_wake); -+ schedule_work(&q->mq_pcpu_wake); - } - - static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1084,7 +1084,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, - queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); - - init_waitqueue_head(&q->mq_freeze_wq); -- kthread_init_work(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); -+ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk); - - /* - * Init percpu_ref in atomic mode so that it's faster to shutdown. -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index 351ea22ffb56..9cd231a27328 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -70,7 +70,7 @@ - #include - #include - #include --#include -+#include - #include - #include - #include -diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c -index 63b10236eb05..185bbdce62b1 100644 ---- a/drivers/spi/spi-rockchip.c -+++ b/drivers/spi/spi-rockchip.c -@@ -22,7 +22,6 @@ - #include - #include - #include --#include - - #define DRIVER_NAME "rockchip-spi" - -diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c -index 82f21fd4afb0..1ef937d799e4 100644 ---- a/drivers/thermal/x86_pkg_temp_thermal.c -+++ b/drivers/thermal/x86_pkg_temp_thermal.c -@@ -29,7 +29,6 @@ - #include - #include - #include --#include - #include - #include - -@@ -330,7 +329,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) - schedule_delayed_work_on(cpu, work, ms); - } - --static void pkg_thermal_notify_work(struct kthread_work *work) -+static int pkg_thermal_notify(u64 msr_val) - { - int cpu = smp_processor_id(); - struct pkg_device *pkgdev; -@@ -349,32 +348,8 @@ static void pkg_thermal_notify_work(struct kthread_work *work) - } - - spin_unlock_irqrestore(&pkg_temp_lock, flags); --} -- --#ifdef CONFIG_PREEMPT_RT_FULL --static DEFINE_KTHREAD_WORK(notify_work, pkg_thermal_notify_work); -- --static int pkg_thermal_notify(u64 msr_val) --{ -- kthread_schedule_work(¬ify_work); -- return 0; --} -- --static void pkg_thermal_notify_flush(void) --{ -- kthread_flush_work(¬ify_work); --} -- --#else /* !CONFIG_PREEMPT_RT_FULL */ -- --static void pkg_thermal_notify_flush(void) { } -- --static int pkg_thermal_notify(u64 msr_val) --{ -- pkg_thermal_notify_work(NULL); - return 0; - } --#endif /* CONFIG_PREEMPT_RT_FULL */ - - static int pkg_temp_thermal_device_add(unsigned int cpu) - { -@@ -573,7 +548,6 @@ static void __exit pkg_temp_thermal_exit(void) - platform_thermal_package_rate_control = NULL; - - cpuhp_remove_state(pkg_thermal_hp_state); -- pkg_thermal_notify_flush(); - debugfs_remove_recursive(debugfs); - kfree(packages); - } -diff --git a/fs/aio.c b/fs/aio.c -index a92119e05869..37e75bb0c406 100644 ---- a/fs/aio.c -+++ b/fs/aio.c -@@ -121,7 +121,7 @@ struct kioctx { - long nr_pages; - - struct rcu_work free_rwork; /* see free_ioctx() */ -- struct kthread_work free_kwork; /* see free_ioctx() */ -+ struct work_struct free_work; /* see free_ioctx() */ - - /* - * signals when all in-flight requests are done -@@ -607,9 +607,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users_work(struct kthread_work *work) -+static void free_ioctx_users_work(struct work_struct *work) - { -- struct kioctx *ctx = container_of(work, struct kioctx, free_kwork); -+ struct kioctx *ctx = container_of(work, struct kioctx, free_work); - struct aio_kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -631,8 +631,8 @@ static void free_ioctx_users(struct percpu_ref *ref) - { - struct kioctx *ctx = container_of(ref, struct kioctx, users); - -- kthread_init_work(&ctx->free_kwork, free_ioctx_users_work); -- kthread_schedule_work(&ctx->free_kwork); -+ INIT_WORK(&ctx->free_work, free_ioctx_users_work); -+ schedule_work(&ctx->free_work); - } - - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) -diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h -index 0473efda4c65..da587e60fe86 100644 ---- a/include/linux/blk-cgroup.h -+++ b/include/linux/blk-cgroup.h -@@ -14,7 +14,7 @@ - * Nauman Rafique - */ - --#include -+#include - #include - #include - #include -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 981103415cd8..6a0bfa0a2c52 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -656,7 +656,7 @@ struct request_queue { - #endif - struct rcu_head rcu_head; - wait_queue_head_t mq_freeze_wq; -- struct kthread_work mq_pcpu_wake; -+ struct work_struct mq_pcpu_wake; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index a9321f6429f2..97d9ba26915e 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -13,7 +13,6 @@ - #include - #include - #include --#include - - #include - #include -@@ -239,11 +238,7 @@ extern void resume_device_irqs(void); - struct irq_affinity_notify { - unsigned int irq; - struct kref kref; --#ifdef CONFIG_PREEMPT_RT_BASE -- struct kthread_work work; --#else - struct work_struct work; --#endif - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); - }; -diff --git a/include/linux/kthread-cgroup.h b/include/linux/kthread-cgroup.h -deleted file mode 100644 -index 53d34bca9d72..000000000000 ---- a/include/linux/kthread-cgroup.h -+++ /dev/null -@@ -1,17 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 */ --#ifndef _LINUX_KTHREAD_CGROUP_H --#define _LINUX_KTHREAD_CGROUP_H --#include --#include -- --#ifdef CONFIG_BLK_CGROUP --void kthread_associate_blkcg(struct cgroup_subsys_state *css); --struct cgroup_subsys_state *kthread_blkcg(void); --#else --static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } --static inline struct cgroup_subsys_state *kthread_blkcg(void) --{ -- return NULL; --} --#endif --#endif -diff --git a/include/linux/kthread.h b/include/linux/kthread.h -index 7cf56eb54103..6b8c064f0cbc 100644 ---- a/include/linux/kthread.h -+++ b/include/linux/kthread.h -@@ -4,6 +4,7 @@ - /* Simple interface for creating and stopping kernel threads without mess. */ - #include - #include -+#include - - __printf(4, 5) - struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), -@@ -197,12 +198,14 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); - - void kthread_destroy_worker(struct kthread_worker *worker); - --extern struct kthread_worker kthread_global_worker; --void kthread_init_global_worker(void); -- --static inline bool kthread_schedule_work(struct kthread_work *work) -+#ifdef CONFIG_BLK_CGROUP -+void kthread_associate_blkcg(struct cgroup_subsys_state *css); -+struct cgroup_subsys_state *kthread_blkcg(void); -+#else -+static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } -+static inline struct cgroup_subsys_state *kthread_blkcg(void) - { -- return kthread_queue_work(&kthread_global_worker, work); -+ return NULL; - } -- -+#endif - #endif /* _LINUX_KTHREAD_H */ -diff --git a/include/linux/swait.h b/include/linux/swait.h -index f426a0661aa0..21ae66cd41d3 100644 ---- a/include/linux/swait.h -+++ b/include/linux/swait.h -@@ -299,4 +299,18 @@ do { \ - __ret; \ - }) - -+#define __swait_event_lock_irq(wq, condition, lock, cmd) \ -+ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ -+ raw_spin_unlock_irq(&lock); \ -+ cmd; \ -+ schedule(); \ -+ raw_spin_lock_irq(&lock)) -+ -+#define swait_event_lock_irq(wq_head, condition, lock) \ -+ do { \ -+ if (condition) \ -+ break; \ -+ __swait_event_lock_irq(wq_head, condition, lock, ); \ -+ } while (0) -+ - #endif /* _LINUX_SWAIT_H */ -diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h -index 60d673e15632..546aa73fba6a 100644 ---- a/include/linux/workqueue.h -+++ b/include/linux/workqueue.h -@@ -455,10 +455,6 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, - - extern void destroy_workqueue(struct workqueue_struct *wq); - --struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); --void free_workqueue_attrs(struct workqueue_attrs *attrs); --int apply_workqueue_attrs(struct workqueue_struct *wq, -- const struct workqueue_attrs *attrs); - int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); - - extern bool queue_work_on(int cpu, struct workqueue_struct *wq, -diff --git a/init/main.c b/init/main.c -index e514dd93de76..6e02188386a7 100644 ---- a/init/main.c -+++ b/init/main.c -@@ -1129,7 +1129,6 @@ static noinline void __init kernel_init_freeable(void) - smp_prepare_cpus(setup_max_cpus); - - workqueue_init(); -- kthread_init_global_worker(); - - init_mm_internals(); - -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 290cd520dba1..82b3728685ca 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -285,12 +285,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, - - if (desc->affinity_notify) { - kref_get(&desc->affinity_notify->kref); -- --#ifdef CONFIG_PREEMPT_RT_BASE -- kthread_schedule_work(&desc->affinity_notify->work); --#else - schedule_work(&desc->affinity_notify->work); --#endif - } - irqd_set(data, IRQD_AFFINITY_SET); - -@@ -328,8 +323,10 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) - } - EXPORT_SYMBOL_GPL(irq_set_affinity_hint); - --static void _irq_affinity_notify(struct irq_affinity_notify *notify) -+static void irq_affinity_notify(struct work_struct *work) - { -+ struct irq_affinity_notify *notify = -+ container_of(work, struct irq_affinity_notify, work); - struct irq_desc *desc = irq_to_desc(notify->irq); - cpumask_var_t cpumask; - unsigned long flags; -@@ -351,25 +348,6 @@ static void _irq_affinity_notify(struct irq_affinity_notify *notify) - kref_put(¬ify->kref, notify->release); - } - --#ifdef CONFIG_PREEMPT_RT_BASE -- --static void irq_affinity_notify(struct kthread_work *work) --{ -- struct irq_affinity_notify *notify = -- container_of(work, struct irq_affinity_notify, work); -- _irq_affinity_notify(notify); --} -- --#else -- --static void irq_affinity_notify(struct work_struct *work) --{ -- struct irq_affinity_notify *notify = -- container_of(work, struct irq_affinity_notify, work); -- _irq_affinity_notify(notify); --} --#endif -- - /** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification -@@ -398,11 +376,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) - if (notify) { - notify->irq = irq; - kref_init(¬ify->kref); --#ifdef CONFIG_PREEMPT_RT_BASE -- kthread_init_work(¬ify->work, irq_affinity_notify); --#else - INIT_WORK(¬ify->work, irq_affinity_notify); --#endif - } - - raw_spin_lock_irqsave(&desc->lock, flags); -@@ -411,11 +385,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) - raw_spin_unlock_irqrestore(&desc->lock, flags); - - if (old_notify) { --#ifdef CONFIG_PREEMPT_RT_BASE -- kthread_cancel_work_sync(¬ify->work); --#else - cancel_work_sync(&old_notify->work); --#endif - kref_put(&old_notify->kref, old_notify->release); - } - -diff --git a/kernel/kthread.c b/kernel/kthread.c -index 9db017761a1f..5641b55783a6 100644 ---- a/kernel/kthread.c -+++ b/kernel/kthread.c -@@ -20,7 +20,6 @@ - #include - #include - #include --#include - #include - - static DEFINE_SPINLOCK(kthread_create_lock); -@@ -1181,19 +1180,6 @@ void kthread_destroy_worker(struct kthread_worker *worker) - } - EXPORT_SYMBOL(kthread_destroy_worker); - --DEFINE_KTHREAD_WORKER(kthread_global_worker); --EXPORT_SYMBOL(kthread_global_worker); -- --__init void kthread_init_global_worker(void) --{ -- kthread_global_worker.task = kthread_create(kthread_worker_fn, -- &kthread_global_worker, -- "kswork"); -- if (WARN_ON(IS_ERR(kthread_global_worker.task))) -- return; -- wake_up_process(kthread_global_worker.task); --} -- - #ifdef CONFIG_BLK_CGROUP - /** - * kthread_associate_blkcg - associate blkcg to current kthread -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 678c2c4de4f5..a1c0123e7636 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3607,6 +3607,7 @@ static inline void sched_submit_work(struct task_struct *tsk) - { - if (!tsk->state) - return; -+ - /* - * If a worker went to sleep, notify and ask workqueue whether - * it wants to wake up a task to maintain concurrency. -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index ed5d8d51ca91..94d97eae0a46 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -730,29 +730,6 @@ static void hrtimer_switch_to_hres(void) - retrigger_next_event(NULL); - } - --#ifdef CONFIG_PREEMPT_RT_FULL -- --static struct swork_event clock_set_delay_work; -- --static void run_clock_set_delay(struct swork_event *event) --{ -- clock_was_set(); --} -- --void clock_was_set_delayed(void) --{ -- swork_queue(&clock_set_delay_work); --} -- --static __init int create_clock_set_delay_thread(void) --{ -- WARN_ON(swork_get()); -- INIT_SWORK(&clock_set_delay_work, run_clock_set_delay); -- return 0; --} --early_initcall(create_clock_set_delay_thread); --#else /* PREEMPT_RT_FULL */ -- - static void clock_was_set_work(struct work_struct *work) - { - clock_was_set(); -@@ -768,7 +745,6 @@ void clock_was_set_delayed(void) - { - schedule_work(&hrtimer_work); - } --#endif - - #else - -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 714d0cfe4d56..75f59299dd06 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -49,8 +49,6 @@ - #include - #include - #include --#include --#include - - #include "workqueue_internal.h" - -@@ -125,11 +123,6 @@ enum { - * cpu or grabbing pool->lock is enough for read access. If - * POOL_DISASSOCIATED is set, it's identical to L. - * -- * On RT we need the extra protection via rt_lock_idle_list() for -- * the list manipulations against read access from -- * wq_worker_sleeping(). All other places are nicely serialized via -- * pool->lock. -- * - * A: wq_pool_attach_mutex protected. - * - * PL: wq_pool_mutex protected. -@@ -151,7 +144,7 @@ enum { - /* struct worker is defined in workqueue_internal.h */ - - struct worker_pool { -- spinlock_t lock; /* the pool lock */ -+ raw_spinlock_t lock; /* the pool lock */ - int cpu; /* I: the associated cpu */ - int node; /* I: the associated node ID */ - int id; /* I: pool ID */ -@@ -304,8 +297,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; - - static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ - static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ --static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ --static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ -+static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ -+static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ - - static LIST_HEAD(workqueues); /* PR: list of all workqueues */ - static bool workqueue_freezing; /* PL: have wqs started freezing? */ -@@ -357,8 +350,6 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq); - struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; - EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); - --static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); -- - static int worker_thread(void *__worker); - static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - -@@ -435,31 +426,6 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ - else - --#ifdef CONFIG_PREEMPT_RT_BASE --static inline void rt_lock_idle_list(struct worker_pool *pool) --{ -- preempt_disable(); --} --static inline void rt_unlock_idle_list(struct worker_pool *pool) --{ -- preempt_enable(); --} --static inline void sched_lock_idle_list(struct worker_pool *pool) { } --static inline void sched_unlock_idle_list(struct worker_pool *pool) { } --#else --static inline void rt_lock_idle_list(struct worker_pool *pool) { } --static inline void rt_unlock_idle_list(struct worker_pool *pool) { } --static inline void sched_lock_idle_list(struct worker_pool *pool) --{ -- spin_lock_irq(&pool->lock); --} --static inline void sched_unlock_idle_list(struct worker_pool *pool) --{ -- spin_unlock_irq(&pool->lock); --} --#endif -- -- - #ifdef CONFIG_DEBUG_OBJECTS_WORK - - static struct debug_obj_descr work_debug_descr; -@@ -862,20 +828,14 @@ static struct worker *first_idle_worker(struct worker_pool *pool) - * Wake up the first idle worker of @pool. - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void wake_up_worker(struct worker_pool *pool) - { -- struct worker *worker; -- -- rt_lock_idle_list(pool); -- -- worker = first_idle_worker(pool); -+ struct worker *worker = first_idle_worker(pool); - - if (likely(worker)) - wake_up_process(worker->task); -- -- rt_unlock_idle_list(pool); - } - - /** -@@ -904,7 +864,7 @@ void wq_worker_running(struct task_struct *task) - */ - void wq_worker_sleeping(struct task_struct *task) - { -- struct worker *worker = kthread_data(task); -+ struct worker *next, *worker = kthread_data(task); - struct worker_pool *pool; - - /* -@@ -921,18 +881,26 @@ void wq_worker_sleeping(struct task_struct *task) - return; - - worker->sleeping = 1; -+ raw_spin_lock_irq(&pool->lock); - - /* - * The counterpart of the following dec_and_test, implied mb, - * worklist not empty test sequence is in insert_work(). - * Please read comment there. -+ * -+ * NOT_RUNNING is clear. This means that we're bound to and -+ * running on the local cpu w/ rq lock held and preemption -+ * disabled, which in turn means that none else could be -+ * manipulating idle_list, so dereferencing idle_list without pool -+ * lock is safe. - */ - if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) { -- sched_lock_idle_list(pool); -- wake_up_worker(pool); -- sched_unlock_idle_list(pool); -+ next = first_idle_worker(pool); -+ if (next) -+ wake_up_process(next->task); - } -+ raw_spin_unlock_irq(&pool->lock); - } - - /** -@@ -943,7 +911,7 @@ void wq_worker_sleeping(struct task_struct *task) - * Set @flags in @worker->flags and adjust nr_running accordingly. - * - * CONTEXT: -- * spin_lock_irq(pool->lock) -+ * raw_spin_lock_irq(pool->lock) - */ - static inline void worker_set_flags(struct worker *worker, unsigned int flags) - { -@@ -968,7 +936,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags) - * Clear @flags in @worker->flags and adjust nr_running accordingly. - * - * CONTEXT: -- * spin_lock_irq(pool->lock) -+ * raw_spin_lock_irq(pool->lock) - */ - static inline void worker_clr_flags(struct worker *worker, unsigned int flags) - { -@@ -1016,7 +984,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) - * actually occurs, it should be easy to locate the culprit work function. - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - * - * Return: - * Pointer to worker which is executing @work if found, %NULL -@@ -1051,7 +1019,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, - * nested inside outer list_for_each_entry_safe(). - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void move_linked_works(struct work_struct *work, struct list_head *head, - struct work_struct **nextp) -@@ -1129,11 +1097,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) - * As both pwqs and pools are RCU protected, the - * following lock operations are safe. - */ -- rcu_read_lock(); -- local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); -+ raw_spin_lock_irq(&pwq->pool->lock); - put_pwq(pwq); -- local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); -- rcu_read_unlock(); -+ raw_spin_unlock_irq(&pwq->pool->lock); - } - } - -@@ -1166,7 +1132,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq) - * decrement nr_in_flight of its pwq and handle workqueue flushing. - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) - { -@@ -1237,7 +1203,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - struct worker_pool *pool; - struct pool_workqueue *pwq; - -- local_lock_irqsave(pendingb_lock, *flags); -+ local_irq_save(*flags); - - /* try to steal the timer if it exists */ - if (is_dwork) { -@@ -1265,7 +1231,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - if (!pool) - goto fail; - -- spin_lock(&pool->lock); -+ raw_spin_lock(&pool->lock); - /* - * work->data is guaranteed to point to pwq only while the work - * item is queued on pwq->wq, and both updating work->data to point -@@ -1294,17 +1260,17 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - /* work->data points to pwq iff queued, point to pool */ - set_work_pool_and_keep_pending(work, pool->id); - -- spin_unlock(&pool->lock); -+ raw_spin_unlock(&pool->lock); - rcu_read_unlock(); - return 1; - } -- spin_unlock(&pool->lock); -+ raw_spin_unlock(&pool->lock); - fail: - rcu_read_unlock(); -- local_unlock_irqrestore(pendingb_lock, *flags); -+ local_irq_restore(*flags); - if (work_is_canceling(work)) - return -ENOENT; -- cpu_chill(); -+ cpu_relax(); - return -EAGAIN; - } - -@@ -1319,7 +1285,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - * work_struct flags. - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, - struct list_head *head, unsigned int extra_flags) -@@ -1406,13 +1372,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - * queued or lose PENDING. Grabbing PENDING and queueing should - * happen with IRQ disabled. - */ --#ifndef CONFIG_PREEMPT_RT_FULL -- /* -- * nort: On RT the "interrupts-disabled" rule has been replaced with -- * pendingb_lock. -- */ - lockdep_assert_irqs_disabled(); --#endif - - debug_work_activate(work); - -@@ -1440,7 +1400,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - if (last_pool && last_pool != pwq->pool) { - struct worker *worker; - -- spin_lock(&last_pool->lock); -+ raw_spin_lock(&last_pool->lock); - - worker = find_worker_executing_work(last_pool, work); - -@@ -1448,11 +1408,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - pwq = worker->current_pwq; - } else { - /* meh... not running there, queue here */ -- spin_unlock(&last_pool->lock); -- spin_lock(&pwq->pool->lock); -+ raw_spin_unlock(&last_pool->lock); -+ raw_spin_lock(&pwq->pool->lock); - } - } else { -- spin_lock(&pwq->pool->lock); -+ raw_spin_lock(&pwq->pool->lock); - } - - /* -@@ -1465,7 +1425,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - */ - if (unlikely(!pwq->refcnt)) { - if (wq->flags & WQ_UNBOUND) { -- spin_unlock(&pwq->pool->lock); -+ raw_spin_unlock(&pwq->pool->lock); - cpu_relax(); - goto retry; - } -@@ -1497,7 +1457,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, - insert_work(pwq, work, worklist, work_flags); - - out: -- spin_unlock(&pwq->pool->lock); -+ raw_spin_unlock(&pwq->pool->lock); - rcu_read_unlock(); - } - -@@ -1518,14 +1478,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, - bool ret = false; - unsigned long flags; - -- local_lock_irqsave(pendingb_lock,flags); -+ local_irq_save(flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_work(cpu, wq, work); - ret = true; - } - -- local_unlock_irqrestore(pendingb_lock, flags); -+ local_irq_restore(flags); - return ret; - } - EXPORT_SYMBOL(queue_work_on); -@@ -1533,12 +1493,11 @@ EXPORT_SYMBOL(queue_work_on); - void delayed_work_timer_fn(struct timer_list *t) - { - struct delayed_work *dwork = from_timer(dwork, t, timer); -+ unsigned long flags; - -- /* XXX */ -- /* local_lock(pendingb_lock); */ -- /* should have been called from irqsafe timer with irq already off */ -+ local_irq_save(flags); - __queue_work(dwork->cpu, dwork->wq, &dwork->work); -- /* local_unlock(pendingb_lock); */ -+ local_irq_restore(flags); - } - EXPORT_SYMBOL(delayed_work_timer_fn); - -@@ -1593,14 +1552,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - unsigned long flags; - - /* read the comment in __queue_work() */ -- local_lock_irqsave(pendingb_lock, flags); -+ local_irq_save(flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_delayed_work(cpu, wq, dwork, delay); - ret = true; - } - -- local_unlock_irqrestore(pendingb_lock, flags); -+ local_irq_restore(flags); - return ret; - } - EXPORT_SYMBOL(queue_delayed_work_on); -@@ -1635,7 +1594,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, - - if (likely(ret >= 0)) { - __queue_delayed_work(cpu, wq, dwork, delay); -- local_unlock_irqrestore(pendingb_lock, flags); -+ local_irq_restore(flags); - } - - /* -ENOENT from try_to_grab_pending() becomes %true */ -@@ -1646,12 +1605,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on); - static void rcu_work_rcufn(struct rcu_head *rcu) - { - struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); -- unsigned long flags; - - /* read the comment in __queue_work() */ -- local_lock_irqsave(pendingb_lock, flags); -+ local_irq_disable(); - __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); -- local_unlock_irqrestore(pendingb_lock, flags); -+ local_irq_enable(); - } - - /** -@@ -1686,7 +1644,7 @@ EXPORT_SYMBOL(queue_rcu_work); - * necessary. - * - * LOCKING: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void worker_enter_idle(struct worker *worker) - { -@@ -1703,9 +1661,7 @@ static void worker_enter_idle(struct worker *worker) - worker->last_active = jiffies; - - /* idle_list is LIFO */ -- rt_lock_idle_list(pool); - list_add(&worker->entry, &pool->idle_list); -- rt_unlock_idle_list(pool); - - if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) - mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); -@@ -1728,7 +1684,7 @@ static void worker_enter_idle(struct worker *worker) - * @worker is leaving idle state. Update stats. - * - * LOCKING: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void worker_leave_idle(struct worker *worker) - { -@@ -1738,9 +1694,7 @@ static void worker_leave_idle(struct worker *worker) - return; - worker_clr_flags(worker, WORKER_IDLE); - pool->nr_idle--; -- rt_lock_idle_list(pool); - list_del_init(&worker->entry); -- rt_unlock_idle_list(pool); - } - - static struct worker *alloc_worker(int node) -@@ -1868,11 +1822,11 @@ static struct worker *create_worker(struct worker_pool *pool) - worker_attach_to_pool(worker, pool); - - /* start the newly created worker */ -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - worker->pool->nr_workers++; - worker_enter_idle(worker); - wake_up_process(worker->task); -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - - return worker; - -@@ -1891,7 +1845,7 @@ static struct worker *create_worker(struct worker_pool *pool) - * be idle. - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void destroy_worker(struct worker *worker) - { -@@ -1908,9 +1862,7 @@ static void destroy_worker(struct worker *worker) - pool->nr_workers--; - pool->nr_idle--; - -- rt_lock_idle_list(pool); - list_del_init(&worker->entry); -- rt_unlock_idle_list(pool); - worker->flags |= WORKER_DIE; - wake_up_process(worker->task); - } -@@ -1919,7 +1871,7 @@ static void idle_worker_timeout(struct timer_list *t) - { - struct worker_pool *pool = from_timer(pool, t, idle_timer); - -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - while (too_many_workers(pool)) { - struct worker *worker; -@@ -1937,7 +1889,7 @@ static void idle_worker_timeout(struct timer_list *t) - destroy_worker(worker); - } - -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - } - - static void send_mayday(struct work_struct *work) -@@ -1968,8 +1920,8 @@ static void pool_mayday_timeout(struct timer_list *t) - struct worker_pool *pool = from_timer(pool, t, mayday_timer); - struct work_struct *work; - -- spin_lock_irq(&pool->lock); -- spin_lock(&wq_mayday_lock); /* for wq->maydays */ -+ raw_spin_lock_irq(&pool->lock); -+ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ - - if (need_to_create_worker(pool)) { - /* -@@ -1982,8 +1934,8 @@ static void pool_mayday_timeout(struct timer_list *t) - send_mayday(work); - } - -- spin_unlock(&wq_mayday_lock); -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock(&wq_mayday_lock); -+ raw_spin_unlock_irq(&pool->lock); - - mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); - } -@@ -2002,7 +1954,7 @@ static void pool_mayday_timeout(struct timer_list *t) - * may_start_working() %true. - * - * LOCKING: -- * spin_lock_irq(pool->lock) which may be released and regrabbed -+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed - * multiple times. Does GFP_KERNEL allocations. Called only from - * manager. - */ -@@ -2011,7 +1963,7 @@ __releases(&pool->lock) - __acquires(&pool->lock) - { - restart: -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - - /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ - mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); -@@ -2027,7 +1979,7 @@ __acquires(&pool->lock) - } - - del_timer_sync(&pool->mayday_timer); -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - /* - * This is necessary even after a new worker was just successfully - * created as @pool->lock was dropped and the new worker might have -@@ -2050,7 +2002,7 @@ __acquires(&pool->lock) - * and may_start_working() is true. - * - * CONTEXT: -- * spin_lock_irq(pool->lock) which may be released and regrabbed -+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed - * multiple times. Does GFP_KERNEL allocations. - * - * Return: -@@ -2073,7 +2025,7 @@ static bool manage_workers(struct worker *worker) - - pool->manager = NULL; - pool->flags &= ~POOL_MANAGER_ACTIVE; -- wake_up(&wq_manager_wait); -+ swake_up_one(&wq_manager_wait); - return true; - } - -@@ -2089,7 +2041,7 @@ static bool manage_workers(struct worker *worker) - * call this function to process a work. - * - * CONTEXT: -- * spin_lock_irq(pool->lock) which is released and regrabbed. -+ * raw_spin_lock_irq(pool->lock) which is released and regrabbed. - */ - static void process_one_work(struct worker *worker, struct work_struct *work) - __releases(&pool->lock) -@@ -2171,7 +2123,7 @@ __acquires(&pool->lock) - */ - set_work_pool_and_clear_pending(work, pool->id); - -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - - lock_map_acquire(&pwq->wq->lockdep_map); - lock_map_acquire(&lockdep_map); -@@ -2226,7 +2178,7 @@ __acquires(&pool->lock) - */ - cond_resched(); - -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - /* clear cpu intensive status */ - if (unlikely(cpu_intensive)) -@@ -2249,7 +2201,7 @@ __acquires(&pool->lock) - * fetches a work from the top and executes it. - * - * CONTEXT: -- * spin_lock_irq(pool->lock) which may be released and regrabbed -+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed - * multiple times. - */ - static void process_scheduled_works(struct worker *worker) -@@ -2291,11 +2243,11 @@ static int worker_thread(void *__worker) - /* tell the scheduler that this is a workqueue worker */ - set_pf_worker(true); - woke_up: -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - /* am I supposed to die? */ - if (unlikely(worker->flags & WORKER_DIE)) { -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - WARN_ON_ONCE(!list_empty(&worker->entry)); - set_pf_worker(false); - -@@ -2361,7 +2313,7 @@ static int worker_thread(void *__worker) - */ - worker_enter_idle(worker); - __set_current_state(TASK_IDLE); -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - schedule(); - goto woke_up; - } -@@ -2415,7 +2367,7 @@ static int rescuer_thread(void *__rescuer) - should_stop = kthread_should_stop(); - - /* see whether any pwq is asking for help */ -- spin_lock_irq(&wq_mayday_lock); -+ raw_spin_lock_irq(&wq_mayday_lock); - - while (!list_empty(&wq->maydays)) { - struct pool_workqueue *pwq = list_first_entry(&wq->maydays, -@@ -2427,11 +2379,11 @@ static int rescuer_thread(void *__rescuer) - __set_current_state(TASK_RUNNING); - list_del_init(&pwq->mayday_node); - -- spin_unlock_irq(&wq_mayday_lock); -+ raw_spin_unlock_irq(&wq_mayday_lock); - - worker_attach_to_pool(rescuer, pool); - -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - /* - * Slurp in all works issued via this workqueue and -@@ -2460,7 +2412,7 @@ static int rescuer_thread(void *__rescuer) - * incur MAYDAY_INTERVAL delay inbetween. - */ - if (need_to_create_worker(pool)) { -- spin_lock(&wq_mayday_lock); -+ raw_spin_lock(&wq_mayday_lock); - /* - * Queue iff we aren't racing destruction - * and somebody else hasn't queued it already. -@@ -2469,7 +2421,7 @@ static int rescuer_thread(void *__rescuer) - get_pwq(pwq); - list_add_tail(&pwq->mayday_node, &wq->maydays); - } -- spin_unlock(&wq_mayday_lock); -+ raw_spin_unlock(&wq_mayday_lock); - } - } - -@@ -2487,14 +2439,14 @@ static int rescuer_thread(void *__rescuer) - if (need_more_worker(pool)) - wake_up_worker(pool); - -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - - worker_detach_from_pool(rescuer); - -- spin_lock_irq(&wq_mayday_lock); -+ raw_spin_lock_irq(&wq_mayday_lock); - } - -- spin_unlock_irq(&wq_mayday_lock); -+ raw_spin_unlock_irq(&wq_mayday_lock); - - if (should_stop) { - __set_current_state(TASK_RUNNING); -@@ -2574,7 +2526,7 @@ static void wq_barrier_func(struct work_struct *work) - * underneath us, so we can't reliably determine pwq from @target. - * - * CONTEXT: -- * spin_lock_irq(pool->lock). -+ * raw_spin_lock_irq(pool->lock). - */ - static void insert_wq_barrier(struct pool_workqueue *pwq, - struct wq_barrier *barr, -@@ -2661,7 +2613,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, - for_each_pwq(pwq, wq) { - struct worker_pool *pool = pwq->pool; - -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - if (flush_color >= 0) { - WARN_ON_ONCE(pwq->flush_color != -1); -@@ -2678,7 +2630,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, - pwq->work_color = work_color; - } - -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - } - - if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) -@@ -2878,9 +2830,9 @@ void drain_workqueue(struct workqueue_struct *wq) - for_each_pwq(pwq, wq) { - bool drained; - -- spin_lock_irq(&pwq->pool->lock); -+ raw_spin_lock_irq(&pwq->pool->lock); - drained = !pwq->nr_active && list_empty(&pwq->delayed_works); -- spin_unlock_irq(&pwq->pool->lock); -+ raw_spin_unlock_irq(&pwq->pool->lock); - - if (drained) - continue; -@@ -2916,7 +2868,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, - return false; - } - -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - /* see the comment in try_to_grab_pending() with the same code */ - pwq = get_work_pwq(work); - if (pwq) { -@@ -2932,7 +2884,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, - check_flush_dependency(pwq->wq, work); - - insert_wq_barrier(pwq, barr, work, worker); -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - - /* - * Force a lock recursion deadlock when using flush_work() inside a -@@ -2951,7 +2903,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, - rcu_read_unlock(); - return true; - already_gone: -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - rcu_read_unlock(); - return false; - } -@@ -3052,7 +3004,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) - - /* tell other tasks trying to grab @work to back off */ - mark_work_canceling(work); -- local_unlock_irqrestore(pendingb_lock, flags); -+ local_irq_restore(flags); - - /* - * This allows canceling during early boot. We know that @work -@@ -3113,10 +3065,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); - */ - bool flush_delayed_work(struct delayed_work *dwork) - { -- local_lock_irq(pendingb_lock); -+ local_irq_disable(); - if (del_timer_sync(&dwork->timer)) - __queue_work(dwork->cpu, dwork->wq, &dwork->work); -- local_unlock_irq(pendingb_lock); -+ local_irq_enable(); - return flush_work(&dwork->work); - } - EXPORT_SYMBOL(flush_delayed_work); -@@ -3154,7 +3106,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) - return false; - - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); -- local_unlock_irqrestore(pendingb_lock, flags); -+ local_irq_restore(flags); - return ret; - } - -@@ -3264,7 +3216,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context); - * - * Undo alloc_workqueue_attrs(). - */ --void free_workqueue_attrs(struct workqueue_attrs *attrs) -+static void free_workqueue_attrs(struct workqueue_attrs *attrs) - { - if (attrs) { - free_cpumask_var(attrs->cpumask); -@@ -3274,21 +3226,20 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) - - /** - * alloc_workqueue_attrs - allocate a workqueue_attrs -- * @gfp_mask: allocation mask to use - * - * Allocate a new workqueue_attrs, initialize with default settings and - * return it. - * - * Return: The allocated new workqueue_attr on success. %NULL on failure. - */ --struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) -+static struct workqueue_attrs *alloc_workqueue_attrs(void) - { - struct workqueue_attrs *attrs; - -- attrs = kzalloc(sizeof(*attrs), gfp_mask); -+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); - if (!attrs) - goto fail; -- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) -+ if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) - goto fail; - - cpumask_copy(attrs->cpumask, cpu_possible_mask); -@@ -3345,7 +3296,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, - */ - static int init_worker_pool(struct worker_pool *pool) - { -- spin_lock_init(&pool->lock); -+ raw_spin_lock_init(&pool->lock); - pool->id = -1; - pool->cpu = -1; - pool->node = NUMA_NO_NODE; -@@ -3366,7 +3317,7 @@ static int init_worker_pool(struct worker_pool *pool) - pool->refcnt = 1; - - /* shouldn't fail above this point */ -- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); -+ pool->attrs = alloc_workqueue_attrs(); - if (!pool->attrs) - return -ENOMEM; - return 0; -@@ -3431,15 +3382,15 @@ static void put_unbound_pool(struct worker_pool *pool) - * @pool's workers from blocking on attach_mutex. We're the last - * manager and @pool gets freed with the flag set. - */ -- spin_lock_irq(&pool->lock); -- wait_event_lock_irq(wq_manager_wait, -+ raw_spin_lock_irq(&pool->lock); -+ swait_event_lock_irq(wq_manager_wait, - !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); - pool->flags |= POOL_MANAGER_ACTIVE; - - while ((worker = first_idle_worker(pool))) - destroy_worker(worker); - WARN_ON(pool->nr_workers || pool->nr_idle); -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - - mutex_lock(&wq_pool_attach_mutex); - if (!list_empty(&pool->workers)) -@@ -3593,7 +3544,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) - return; - - /* this function can be called during early boot w/ irq disabled */ -- spin_lock_irqsave(&pwq->pool->lock, flags); -+ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - - /* - * During [un]freezing, the caller is responsible for ensuring that -@@ -3616,7 +3567,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) - pwq->max_active = 0; - } - -- spin_unlock_irqrestore(&pwq->pool->lock, flags); -+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); - } - - /* initialize newly alloced @pwq which is associated with @wq and @pool */ -@@ -3789,8 +3740,8 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, - - ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); - -- new_attrs = alloc_workqueue_attrs(GFP_KERNEL); -- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); -+ new_attrs = alloc_workqueue_attrs(); -+ tmp_attrs = alloc_workqueue_attrs(); - if (!ctx || !new_attrs || !tmp_attrs) - goto out_free; - -@@ -3926,7 +3877,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, - * - * Return: 0 on success and -errno on failure. - */ --int apply_workqueue_attrs(struct workqueue_struct *wq, -+static int apply_workqueue_attrs(struct workqueue_struct *wq, - const struct workqueue_attrs *attrs) - { - int ret; -@@ -3937,7 +3888,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, - - return ret; - } --EXPORT_SYMBOL_GPL(apply_workqueue_attrs); - - /** - * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug -@@ -4015,9 +3965,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, - - use_dfl_pwq: - mutex_lock(&wq->mutex); -- spin_lock_irq(&wq->dfl_pwq->pool->lock); -+ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); - get_pwq(wq->dfl_pwq); -- spin_unlock_irq(&wq->dfl_pwq->pool->lock); -+ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); - old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); - out_unlock: - mutex_unlock(&wq->mutex); -@@ -4136,7 +4086,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, - return NULL; - - if (flags & WQ_UNBOUND) { -- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); -+ wq->unbound_attrs = alloc_workqueue_attrs(); - if (!wq->unbound_attrs) - goto err_free_wq; - } -@@ -4223,9 +4173,9 @@ void destroy_workqueue(struct workqueue_struct *wq) - struct worker *rescuer = wq->rescuer; - - /* this prevents new queueing */ -- spin_lock_irq(&wq_mayday_lock); -+ raw_spin_lock_irq(&wq_mayday_lock); - wq->rescuer = NULL; -- spin_unlock_irq(&wq_mayday_lock); -+ raw_spin_unlock_irq(&wq_mayday_lock); - - /* rescuer will empty maydays list before exiting */ - kthread_stop(rescuer->task); -@@ -4420,10 +4370,10 @@ unsigned int work_busy(struct work_struct *work) - rcu_read_lock(); - pool = get_work_pool(work); - if (pool) { -- spin_lock_irqsave(&pool->lock, flags); -+ raw_spin_lock_irqsave(&pool->lock, flags); - if (find_worker_executing_work(pool, work)) - ret |= WORK_BUSY_RUNNING; -- spin_unlock_irqrestore(&pool->lock, flags); -+ raw_spin_unlock_irqrestore(&pool->lock, flags); - } - rcu_read_unlock(); - -@@ -4630,10 +4580,10 @@ void show_workqueue_state(void) - pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); - - for_each_pwq(pwq, wq) { -- spin_lock_irqsave(&pwq->pool->lock, flags); -+ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq->nr_active || !list_empty(&pwq->delayed_works)) - show_pwq(pwq); -- spin_unlock_irqrestore(&pwq->pool->lock, flags); -+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); - /* - * We could be printing a lot from atomic context, e.g. - * sysrq-t -> show_workqueue_state(). Avoid triggering -@@ -4647,7 +4597,7 @@ void show_workqueue_state(void) - struct worker *worker; - bool first = true; - -- spin_lock_irqsave(&pool->lock, flags); -+ raw_spin_lock_irqsave(&pool->lock, flags); - if (pool->nr_workers == pool->nr_idle) - goto next_pool; - -@@ -4666,7 +4616,7 @@ void show_workqueue_state(void) - } - pr_cont("\n"); - next_pool: -- spin_unlock_irqrestore(&pool->lock, flags); -+ raw_spin_unlock_irqrestore(&pool->lock, flags); - /* - * We could be printing a lot from atomic context, e.g. - * sysrq-t -> show_workqueue_state(). Avoid triggering -@@ -4696,7 +4646,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) - struct worker_pool *pool = worker->pool; - - if (pool) { -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - /* - * ->desc tracks information (wq name or - * set_worker_desc()) for the latest execution. If -@@ -4710,7 +4660,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) - scnprintf(buf + off, size - off, "-%s", - worker->desc); - } -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - } - } - -@@ -4741,7 +4691,7 @@ static void unbind_workers(int cpu) - - for_each_cpu_worker_pool(pool, cpu) { - mutex_lock(&wq_pool_attach_mutex); -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - /* - * We've blocked all attach/detach operations. Make all workers -@@ -4755,7 +4705,7 @@ static void unbind_workers(int cpu) - - pool->flags |= POOL_DISASSOCIATED; - -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - mutex_unlock(&wq_pool_attach_mutex); - - /* -@@ -4781,9 +4731,9 @@ static void unbind_workers(int cpu) - * worker blocking could lead to lengthy stalls. Kick off - * unbound chain execution of currently pending work items. - */ -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - wake_up_worker(pool); -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - } - } - -@@ -4810,7 +4760,7 @@ static void rebind_workers(struct worker_pool *pool) - WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, - pool->attrs->cpumask) < 0); - -- spin_lock_irq(&pool->lock); -+ raw_spin_lock_irq(&pool->lock); - - pool->flags &= ~POOL_DISASSOCIATED; - -@@ -4849,7 +4799,7 @@ static void rebind_workers(struct worker_pool *pool) - WRITE_ONCE(worker->flags, worker_flags); - } - -- spin_unlock_irq(&pool->lock); -+ raw_spin_unlock_irq(&pool->lock); - } - - /** -@@ -5301,7 +5251,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) - - lockdep_assert_held(&wq_pool_mutex); - -- attrs = alloc_workqueue_attrs(GFP_KERNEL); -+ attrs = alloc_workqueue_attrs(); - if (!attrs) - return NULL; - -@@ -5723,7 +5673,7 @@ static void __init wq_numa_init(void) - return; - } - -- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); -+ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(); - BUG_ON(!wq_update_unbound_numa_attrs_buf); - - /* -@@ -5798,7 +5748,7 @@ int __init workqueue_init_early(void) - for (i = 0; i < NR_STD_WORKER_POOLS; i++) { - struct workqueue_attrs *attrs; - -- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); -+ BUG_ON(!(attrs = alloc_workqueue_attrs())); - attrs->nice = std_nice[i]; - unbound_std_wq_attrs[i] = attrs; - -@@ -5807,7 +5757,7 @@ int __init workqueue_init_early(void) - * guaranteed by max_active which is enforced by pwqs. - * Turn off NUMA so that dfl_pwq is used for all nodes. - */ -- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); -+ BUG_ON(!(attrs = alloc_workqueue_attrs())); - attrs->nice = std_nice[i]; - attrs->no_numa = true; - ordered_wq_attrs[i] = attrs; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch b/kernel/patches-4.19.x-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch deleted file mode 100644 index af6af06ca..000000000 --- a/kernel/patches-4.19.x-rt/0290-i2c-exynos5-Remove-IRQF_ONESHOT.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 7c3f8afe7bf7bfc55e4dd00b2cdc0471d7fa56b4 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 13 Aug 2019 12:30:12 +0200 -Subject: [PATCH 290/328] i2c: exynos5: Remove IRQF_ONESHOT - -[ Upstream commit 4b217df0ab3f7910c96e42091cc7d9f221d05f01 ] - -The drivers sets IRQF_ONESHOT and passes only a primary handler. The IRQ -is masked while the primary is handler is invoked independently of -IRQF_ONESHOT. -With IRQF_ONESHOT the core code will not force-thread the interrupt and -this is probably not intended. I *assume* that the original author copied -the IRQ registration from another driver which passed a primary and -secondary handler and removed the secondary handler but keeping the -ONESHOT flag. - -Remove IRQF_ONESHOT. - -Reported-by: Benjamin Rouxel -Tested-by: Benjamin Rouxel -Cc: Kukjin Kim -Cc: Krzysztof Kozlowski -Cc: linux-samsung-soc@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/i2c/busses/i2c-exynos5.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c -index c1ce2299a76e..5c57ecf4b79e 100644 ---- a/drivers/i2c/busses/i2c-exynos5.c -+++ b/drivers/i2c/busses/i2c-exynos5.c -@@ -800,9 +800,7 @@ static int exynos5_i2c_probe(struct platform_device *pdev) - } - - ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq, -- IRQF_NO_SUSPEND | IRQF_ONESHOT, -- dev_name(&pdev->dev), i2c); -- -+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c); - if (ret != 0) { - dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq); - goto err_clk; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch b/kernel/patches-4.19.x-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch deleted file mode 100644 index 0874dda06..000000000 --- a/kernel/patches-4.19.x-rt/0291-i2c-hix5hd2-Remove-IRQF_ONESHOT.patch +++ /dev/null @@ -1,41 +0,0 @@ -From d1b1f2e9ff0c025196d564544a0b808fdb73cc16 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 13 Aug 2019 12:30:37 +0200 -Subject: [PATCH 291/328] i2c: hix5hd2: Remove IRQF_ONESHOT - -[ Upstream commit e88b481f3f86f11e3243e0808a830e5ca5782a9d ] - -The drivers sets IRQF_ONESHOT and passes only a primary handler. The IRQ -is masked while the primary is handler is invoked independently of -IRQF_ONESHOT. -With IRQF_ONESHOT the core code will not force-thread the interrupt and -this is probably not intended. I *assume* that the original author copied -the IRQ registration from another driver which passed a primary and -secondary handler and removed the secondary handler but keeping the -ONESHOT flag. - -Remove IRQF_ONESHOT. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/i2c/busses/i2c-hix5hd2.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c -index 061a4bfb03f4..575aff50b19a 100644 ---- a/drivers/i2c/busses/i2c-hix5hd2.c -+++ b/drivers/i2c/busses/i2c-hix5hd2.c -@@ -449,8 +449,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev) - hix5hd2_i2c_init(priv); - - ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq, -- IRQF_NO_SUSPEND | IRQF_ONESHOT, -- dev_name(&pdev->dev), priv); -+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv); - if (ret != 0) { - dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq); - goto err_clk; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch b/kernel/patches-4.19.x-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch deleted file mode 100644 index b4d0f07b6..000000000 --- a/kernel/patches-4.19.x-rt/0292-sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch +++ /dev/null @@ -1,49 +0,0 @@ -From adb807c2fbd6d2f64515708367de2cae2b106925 Mon Sep 17 00:00:00 2001 -From: Juri Lelli -Date: Wed, 31 Jul 2019 12:37:15 +0200 -Subject: [PATCH 292/328] sched/deadline: Ensure inactive_timer runs in hardirq - context - -[ Upstream commit ba94e7aed7405c58251b1380e6e7d73aa8284b41 ] - -SCHED_DEADLINE inactive timer needs to run in hardirq context (as -dl_task_timer already does) on PREEMPT_RT - -Change the mode to HRTIMER_MODE_REL_HARD. - -[ tglx: Fixed up the start site, so mode debugging works ] - -Signed-off-by: Juri Lelli -Signed-off-by: Thomas Gleixner -Link: https://lkml.kernel.org/r/20190731103715.4047-1-juri.lelli@redhat.com -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/deadline.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index 974a8f9b615a..929167a1d991 100644 ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -287,7 +287,7 @@ static void task_non_contending(struct task_struct *p) - - dl_se->dl_non_contending = 1; - get_task_struct(p); -- hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL); -+ hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); - } - - static void task_contending(struct sched_dl_entity *dl_se, int flags) -@@ -1325,7 +1325,7 @@ void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) - { - struct hrtimer *timer = &dl_se->inactive_timer; - -- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); - timer->function = inactive_task_timer; - } - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch b/kernel/patches-4.19.x-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch deleted file mode 100644 index 8c27d1548..000000000 --- a/kernel/patches-4.19.x-rt/0293-thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch +++ /dev/null @@ -1,119 +0,0 @@ -From 40e46d2cf474897d209be74b3c6471dbdf4f2aea Mon Sep 17 00:00:00 2001 -From: Clark Williams -Date: Mon, 15 Jul 2019 15:25:00 -0500 -Subject: [PATCH 293/328] thermal/x86_pkg_temp: make pkg_temp_lock a raw - spinlock - -[ Upstream commit 8b03bb3ea7861b70b506199a69b1c8f81fe2d4d0 ] - -The spinlock pkg_temp_lock has the potential of being taken in atomic -context on v5.2-rt PREEMPT_RT. It's static and limited scope so -go ahead and make it a raw spinlock. - -Signed-off-by: Clark Williams -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/thermal/x86_pkg_temp_thermal.c | 24 ++++++++++++------------ - 1 file changed, 12 insertions(+), 12 deletions(-) - -diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c -index 1ef937d799e4..540becb78a0f 100644 ---- a/drivers/thermal/x86_pkg_temp_thermal.c -+++ b/drivers/thermal/x86_pkg_temp_thermal.c -@@ -75,7 +75,7 @@ static int max_packages __read_mostly; - /* Array of package pointers */ - static struct pkg_device **packages; - /* Serializes interrupt notification, work and hotplug */ --static DEFINE_SPINLOCK(pkg_temp_lock); -+static DEFINE_RAW_SPINLOCK(pkg_temp_lock); - /* Protects zone operation in the work function against hotplug removal */ - static DEFINE_MUTEX(thermal_zone_mutex); - -@@ -291,12 +291,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) - u64 msr_val, wr_val; - - mutex_lock(&thermal_zone_mutex); -- spin_lock_irq(&pkg_temp_lock); -+ raw_spin_lock_irq(&pkg_temp_lock); - ++pkg_work_cnt; - - pkgdev = pkg_temp_thermal_get_dev(cpu); - if (!pkgdev) { -- spin_unlock_irq(&pkg_temp_lock); -+ raw_spin_unlock_irq(&pkg_temp_lock); - mutex_unlock(&thermal_zone_mutex); - return; - } -@@ -310,7 +310,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) - } - - enable_pkg_thres_interrupt(); -- spin_unlock_irq(&pkg_temp_lock); -+ raw_spin_unlock_irq(&pkg_temp_lock); - - /* - * If tzone is not NULL, then thermal_zone_mutex will prevent the -@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_val) - struct pkg_device *pkgdev; - unsigned long flags; - -- spin_lock_irqsave(&pkg_temp_lock, flags); -+ raw_spin_lock_irqsave(&pkg_temp_lock, flags); - ++pkg_interrupt_cnt; - - disable_pkg_thres_interrupt(); -@@ -347,7 +347,7 @@ static int pkg_thermal_notify(u64 msr_val) - pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); - } - -- spin_unlock_irqrestore(&pkg_temp_lock, flags); -+ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); - return 0; - } - -@@ -393,9 +393,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) - pkgdev->msr_pkg_therm_high); - - cpumask_set_cpu(cpu, &pkgdev->cpumask); -- spin_lock_irq(&pkg_temp_lock); -+ raw_spin_lock_irq(&pkg_temp_lock); - packages[pkgid] = pkgdev; -- spin_unlock_irq(&pkg_temp_lock); -+ raw_spin_unlock_irq(&pkg_temp_lock); - return 0; - } - -@@ -432,7 +432,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) - } - - /* Protect against work and interrupts */ -- spin_lock_irq(&pkg_temp_lock); -+ raw_spin_lock_irq(&pkg_temp_lock); - - /* - * Check whether this cpu was the current target and store the new -@@ -464,9 +464,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) - * To cancel the work we need to drop the lock, otherwise - * we might deadlock if the work needs to be flushed. - */ -- spin_unlock_irq(&pkg_temp_lock); -+ raw_spin_unlock_irq(&pkg_temp_lock); - cancel_delayed_work_sync(&pkgdev->work); -- spin_lock_irq(&pkg_temp_lock); -+ raw_spin_lock_irq(&pkg_temp_lock); - /* - * If this is not the last cpu in the package and the work - * did not run after we dropped the lock above, then we -@@ -477,7 +477,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) - pkg_thermal_schedule_work(target, &pkgdev->work); - } - -- spin_unlock_irq(&pkg_temp_lock); -+ raw_spin_unlock_irq(&pkg_temp_lock); - - /* Final cleanup if this is the last cpu */ - if (lastcpu) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch b/kernel/patches-4.19.x-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch deleted file mode 100644 index acc133923..000000000 --- a/kernel/patches-4.19.x-rt/0294-dma-buf-Use-seqlock_t-instread-disabling-preemption.patch +++ /dev/null @@ -1,295 +0,0 @@ -From fb3773dcd97159de33e77ec876a5ec0f3d70d576 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 14 Aug 2019 16:38:43 +0200 -Subject: [PATCH 294/328] dma-buf: Use seqlock_t instread disabling preemption - -[ Upstream commit 240610aa31094f51f299f06eb8dae8d4cd8d4500 ] - -"dma reservation" disables preemption while acquiring the write access -for "seqcount" and then may acquire a spinlock_t. - -Replace the seqcount with a seqlock_t which provides seqcount like -semantic and lock for writer. - -Link: https://lkml.kernel.org/r/f410b429-db86-f81c-7c67-f563fa808b62@free.fr -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/dma-buf/dma-buf.c | 8 ++-- - drivers/dma-buf/reservation.c | 43 +++++++------------ - .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 +-- - drivers/gpu/drm/i915/i915_gem.c | 10 ++--- - include/linux/reservation.h | 4 +- - 5 files changed, 29 insertions(+), 42 deletions(-) - -diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c -index 69842145c223..4c3ef46e7149 100644 ---- a/drivers/dma-buf/dma-buf.c -+++ b/drivers/dma-buf/dma-buf.c -@@ -179,7 +179,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) - return 0; - - retry: -- seq = read_seqcount_begin(&resv->seq); -+ seq = read_seqbegin(&resv->seq); - rcu_read_lock(); - - fobj = rcu_dereference(resv->fence); -@@ -188,7 +188,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) - else - shared_count = 0; - fence_excl = rcu_dereference(resv->fence_excl); -- if (read_seqcount_retry(&resv->seq, seq)) { -+ if (read_seqretry(&resv->seq, seq)) { - rcu_read_unlock(); - goto retry; - } -@@ -1046,12 +1046,12 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) - - robj = buf_obj->resv; - while (true) { -- seq = read_seqcount_begin(&robj->seq); -+ seq = read_seqbegin(&robj->seq); - rcu_read_lock(); - fobj = rcu_dereference(robj->fence); - shared_count = fobj ? fobj->shared_count : 0; - fence = rcu_dereference(robj->fence_excl); -- if (!read_seqcount_retry(&robj->seq, seq)) -+ if (!read_seqretry(&robj->seq, seq)) - break; - rcu_read_unlock(); - } -diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c -index 49ab09468ba1..f11d58492216 100644 ---- a/drivers/dma-buf/reservation.c -+++ b/drivers/dma-buf/reservation.c -@@ -109,8 +109,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, - - dma_fence_get(fence); - -- preempt_disable(); -- write_seqcount_begin(&obj->seq); -+ write_seqlock(&obj->seq); - - for (i = 0; i < fobj->shared_count; ++i) { - struct dma_fence *old_fence; -@@ -121,8 +120,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, - if (old_fence->context == fence->context) { - /* memory barrier is added by write_seqcount_begin */ - RCU_INIT_POINTER(fobj->shared[i], fence); -- write_seqcount_end(&obj->seq); -- preempt_enable(); -+ write_sequnlock(&obj->seq); - - dma_fence_put(old_fence); - return; -@@ -146,8 +144,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, - fobj->shared_count++; - } - -- write_seqcount_end(&obj->seq); -- preempt_enable(); -+ write_sequnlock(&obj->seq); - - dma_fence_put(signaled); - } -@@ -191,15 +188,13 @@ reservation_object_add_shared_replace(struct reservation_object *obj, - fobj->shared_count++; - - done: -- preempt_disable(); -- write_seqcount_begin(&obj->seq); -+ write_seqlock(&obj->seq); - /* - * RCU_INIT_POINTER can be used here, - * seqcount provides the necessary barriers - */ - RCU_INIT_POINTER(obj->fence, fobj); -- write_seqcount_end(&obj->seq); -- preempt_enable(); -+ write_sequnlock(&obj->seq); - - if (!old) - return; -@@ -259,14 +254,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, - if (fence) - dma_fence_get(fence); - -- preempt_disable(); -- write_seqcount_begin(&obj->seq); -- /* write_seqcount_begin provides the necessary memory barrier */ -+ write_seqlock(&obj->seq); - RCU_INIT_POINTER(obj->fence_excl, fence); - if (old) - old->shared_count = 0; -- write_seqcount_end(&obj->seq); -- preempt_enable(); -+ write_sequnlock(&obj->seq); - - /* inplace update, no shared fences */ - while (i--) -@@ -349,13 +341,10 @@ int reservation_object_copy_fences(struct reservation_object *dst, - src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); - -- preempt_disable(); -- write_seqcount_begin(&dst->seq); -- /* write_seqcount_begin provides the necessary memory barrier */ -+ write_seqlock(&dst->seq); - RCU_INIT_POINTER(dst->fence_excl, new); - RCU_INIT_POINTER(dst->fence, dst_list); -- write_seqcount_end(&dst->seq); -- preempt_enable(); -+ write_sequnlock(&dst->seq); - - if (src_list) - kfree_rcu(src_list, rcu); -@@ -396,7 +385,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, - shared_count = i = 0; - - rcu_read_lock(); -- seq = read_seqcount_begin(&obj->seq); -+ seq = read_seqbegin(&obj->seq); - - fence_excl = rcu_dereference(obj->fence_excl); - if (fence_excl && !dma_fence_get_rcu(fence_excl)) -@@ -445,7 +434,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, - } - } - -- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { -+ if (i != shared_count || read_seqretry(&obj->seq, seq)) { - while (i--) - dma_fence_put(shared[i]); - dma_fence_put(fence_excl); -@@ -494,7 +483,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - - retry: - shared_count = 0; -- seq = read_seqcount_begin(&obj->seq); -+ seq = read_seqbegin(&obj->seq); - rcu_read_lock(); - i = -1; - -@@ -541,7 +530,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - - rcu_read_unlock(); - if (fence) { -- if (read_seqcount_retry(&obj->seq, seq)) { -+ if (read_seqretry(&obj->seq, seq)) { - dma_fence_put(fence); - goto retry; - } -@@ -597,7 +586,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - retry: - ret = true; - shared_count = 0; -- seq = read_seqcount_begin(&obj->seq); -+ seq = read_seqbegin(&obj->seq); - - if (test_all) { - unsigned i; -@@ -618,7 +607,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - break; - } - -- if (read_seqcount_retry(&obj->seq, seq)) -+ if (read_seqretry(&obj->seq, seq)) - goto retry; - } - -@@ -631,7 +620,7 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - if (ret < 0) - goto retry; - -- if (read_seqcount_retry(&obj->seq, seq)) -+ if (read_seqretry(&obj->seq, seq)) - goto retry; - } - } -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c -index f92597c292fe..10c675850aac 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c -@@ -261,11 +261,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, - } - - /* Install the new fence list, seqcount provides the barriers */ -- preempt_disable(); -- write_seqcount_begin(&resv->seq); -+ write_seqlock(&resv->seq); - RCU_INIT_POINTER(resv->fence, new); -- write_seqcount_end(&resv->seq); -- preempt_enable(); -+ write_sequnlock(&resv->seq); - - /* Drop the references to the removed fences or move them to ef_list */ - for (i = j, k = 0; i < old->shared_count; ++i) { -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c -index c7d05ac7af3c..d484e79316bf 100644 ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -516,7 +516,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, - long timeout, - struct intel_rps_client *rps_client) - { -- unsigned int seq = __read_seqcount_begin(&resv->seq); -+ unsigned int seq = read_seqbegin(&resv->seq); - struct dma_fence *excl; - bool prune_fences = false; - -@@ -569,9 +569,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, - * signaled and that the reservation object has not been changed (i.e. - * no new fences have been added). - */ -- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { -+ if (prune_fences && !read_seqretry(&resv->seq, seq)) { - if (reservation_object_trylock(resv)) { -- if (!__read_seqcount_retry(&resv->seq, seq)) -+ if (!read_seqretry(&resv->seq, seq)) - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); - } -@@ -4615,7 +4615,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, - * - */ - retry: -- seq = raw_read_seqcount(&obj->resv->seq); -+ seq = read_seqbegin(&obj->resv->seq); - - /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); -@@ -4633,7 +4633,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, - } - } - -- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) -+ if (args->busy && read_seqretry(&obj->resv->seq, seq)) - goto retry; - - err = 0; -diff --git a/include/linux/reservation.h b/include/linux/reservation.h -index 02166e815afb..0b31df1af698 100644 ---- a/include/linux/reservation.h -+++ b/include/linux/reservation.h -@@ -72,7 +72,7 @@ struct reservation_object_list { - */ - struct reservation_object { - struct ww_mutex lock; -- seqcount_t seq; -+ seqlock_t seq; - - struct dma_fence __rcu *fence_excl; - struct reservation_object_list __rcu *fence; -@@ -92,7 +92,7 @@ reservation_object_init(struct reservation_object *obj) - { - ww_mutex_init(&obj->lock, &reservation_ww_class); - -- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); -+ seqlock_init(&obj->seq); - RCU_INIT_POINTER(obj->fence, NULL); - RCU_INIT_POINTER(obj->fence_excl, NULL); - obj->staged = NULL; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch b/kernel/patches-4.19.x-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch deleted file mode 100644 index 3beeb3965..000000000 --- a/kernel/patches-4.19.x-rt/0295-KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 616f33b56d89956d43be1ee3a48873012a5e01f2 Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Tue, 13 Aug 2019 14:29:41 +0200 -Subject: [PATCH 295/328] KVM: arm/arm64: Let the timer expire in hardirq - context on RT - -[ Upstream commit 719cc080c914045a6e35787bf4dc3ba91cfd3efb ] - -The timers are canceled from an preempt-notifier which is invoked with -disabled preemption which is not allowed on PREEMPT_RT. -The timer callback is short so in could be invoked in hard-IRQ context -on -RT. - -Let the timer expire on hard-IRQ context even on -RT. - -Signed-off-by: Thomas Gleixner -Acked-by: Marc Zyngier -Tested-by: Julien Grall -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - virt/kvm/arm/arch_timer.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c -index 17cecc96f735..217d39f40393 100644 ---- a/virt/kvm/arm/arch_timer.c -+++ b/virt/kvm/arm/arch_timer.c -@@ -67,7 +67,7 @@ static inline bool userspace_irqchip(struct kvm *kvm) - static void soft_timer_start(struct hrtimer *hrt, u64 ns) - { - hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), -- HRTIMER_MODE_ABS); -+ HRTIMER_MODE_ABS_HARD); - } - - static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) -@@ -638,10 +638,10 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) - vcpu_ptimer(vcpu)->cntvoff = 0; - - INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); -- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - timer->bg_timer.function = kvm_bg_timer_expire; - -- hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - timer->phys_timer.function = kvm_phys_timer_expire; - - vtimer->irq.irq = default_vtimer_irq.irq; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch b/kernel/patches-4.19.x-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch deleted file mode 100644 index fa4c93c82..000000000 --- a/kernel/patches-4.19.x-rt/0296-x86-preempt-Check-preemption-level-before-looking-at.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 3ff694fd2d9a9eeb03196cbfbb4dd353a83d680e Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 14 Aug 2019 17:08:58 +0200 -Subject: [PATCH 296/328] x86: preempt: Check preemption level before looking - at lazy-preempt - -[ Upstream commit 19fc8557f2323c52b26561651ed4d51fc688a740 ] - -Before evaluating the lazy-preempt state it must be ensure that the -preempt-count is zero. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - arch/x86/include/asm/preempt.h | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h -index f66708779274..afa0e42ccdd1 100644 ---- a/arch/x86/include/asm/preempt.h -+++ b/arch/x86/include/asm/preempt.h -@@ -96,6 +96,8 @@ static __always_inline bool __preempt_count_dec_and_test(void) - if (____preempt_count_dec_and_test()) - return true; - #ifdef CONFIG_PREEMPT_LAZY -+ if (preempt_count()) -+ return false; - if (current_thread_info()->preempt_lazy_count) - return false; - return test_thread_flag(TIF_NEED_RESCHED_LAZY); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch b/kernel/patches-4.19.x-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch deleted file mode 100644 index 7ad84a97f..000000000 --- a/kernel/patches-4.19.x-rt/0297-hrtimer-Use-READ_ONCE-to-access-timer-base-in-hrimer.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 20fa662be972ae5c608f2e5c919aedc9bbdc8930 Mon Sep 17 00:00:00 2001 -From: Julien Grall -Date: Wed, 21 Aug 2019 10:24:07 +0100 -Subject: [PATCH 297/328] hrtimer: Use READ_ONCE to access timer->base in - hrimer_grab_expiry_lock() - -[ Upstream commit 2c8fdbe7ef0ad06c1a326886c5954e117b5657d6 ] - -The update to timer->base is protected by the base->cpu_base->lock(). -However, hrtimer_grab_expirty_lock() does not access it with the lock. - -So it would theorically be possible to have timer->base changed under -our feet. We need to prevent the compiler to refetch timer->base so the -check and the access is performed on the same base. - -Other access of timer->base are either done with a lock or protected -with READ_ONCE(). So use READ_ONCE() in hrtimer_grab_expirty_lock(). - -Signed-off-by: Julien Grall -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/time/hrtimer.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 94d97eae0a46..d6026c170c2d 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -941,7 +941,7 @@ EXPORT_SYMBOL_GPL(hrtimer_forward); - - void hrtimer_grab_expiry_lock(const struct hrtimer *timer) - { -- struct hrtimer_clock_base *base = timer->base; -+ struct hrtimer_clock_base *base = READ_ONCE(timer->base); - - if (base && base->cpu_base) { - spin_lock(&base->cpu_base->softirq_expiry_lock); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch b/kernel/patches-4.19.x-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch deleted file mode 100644 index ad396b51d..000000000 --- a/kernel/patches-4.19.x-rt/0298-hrtimer-Don-t-grab-the-expiry-lock-for-non-soft-hrti.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 3097c6c64c15f7942c9b280325c27db70de41d04 Mon Sep 17 00:00:00 2001 -From: Julien Grall -Date: Wed, 21 Aug 2019 10:24:08 +0100 -Subject: [PATCH 298/328] hrtimer: Don't grab the expiry lock for non-soft - hrtimer - -[ Upstream commit fd420354bea2f57c11f3de191dffdeea76531e76 ] - -Acquiring the lock in hrtimer_grab_expiry_lock() is designed for -sleeping-locks and should not be used with disabled interrupts. -hrtimer_cancel() may invoke hrtimer_grab_expiry_lock() also for locks -which expire in hard-IRQ context. - -Let hrtimer_cancel() invoke hrtimer_grab_expiry_lock() only for locks -which expire in softirq context. - -Signed-off-by: Julien Grall -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: rewrite changelog] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index d6026c170c2d..49d20fe8570f 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -943,7 +943,7 @@ void hrtimer_grab_expiry_lock(const struct hrtimer *timer) - { - struct hrtimer_clock_base *base = READ_ONCE(timer->base); - -- if (base && base->cpu_base) { -+ if (timer->is_soft && base && base->cpu_base) { - spin_lock(&base->cpu_base->softirq_expiry_lock); - spin_unlock(&base->cpu_base->softirq_expiry_lock); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch b/kernel/patches-4.19.x-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch deleted file mode 100644 index 10eb74f7b..000000000 --- a/kernel/patches-4.19.x-rt/0299-hrtimer-Prevent-using-hrtimer_grab_expiry_lock-on-mi.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0167d3fb02b4f30f0f219f65726e5344ad871f9f Mon Sep 17 00:00:00 2001 -From: Julien Grall -Date: Wed, 21 Aug 2019 10:24:09 +0100 -Subject: [PATCH 299/328] hrtimer: Prevent using hrtimer_grab_expiry_lock() on - migration_base - -[ Upstream commit cef1b87f98823af923a386f3f69149acb212d4a1 ] - -As tglx puts it: -|If base == migration_base then there is no point to lock soft_expiry_lock -|simply because the timer is not executing the callback in soft irq context -|and the whole lock/unlock dance can be avoided. - -Furthermore, all the path leading to hrtimer_grab_expiry_lock() assumes -timer->base and timer->base->cpu_base are always non-NULL. So it is safe -to remove the NULL checks here. - -Signed-off-by: Julien Grall -Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1908211557420.2223@nanos.tec.linutronix.de -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: rewrite changelog] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 49d20fe8570f..1a5167c68310 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -943,7 +943,7 @@ void hrtimer_grab_expiry_lock(const struct hrtimer *timer) - { - struct hrtimer_clock_base *base = READ_ONCE(timer->base); - -- if (timer->is_soft && base && base->cpu_base) { -+ if (timer->is_soft && base != &migration_base) { - spin_lock(&base->cpu_base->softirq_expiry_lock); - spin_unlock(&base->cpu_base->softirq_expiry_lock); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch b/kernel/patches-4.19.x-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch deleted file mode 100644 index 1ff434942..000000000 --- a/kernel/patches-4.19.x-rt/0300-hrtimer-Add-a-missing-bracket-and-hide-migration_bas.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 6aecd0ba09e1dbf7afd6d646c65f732654892d29 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 4 Sep 2019 16:55:27 +0200 -Subject: [PATCH 300/328] hrtimer: Add a missing bracket and hide - `migration_base' on !SMP - -[ Upstream commit 47b6de0b7f22c28b40275aeede3993d807668c3b ] - -[ Upstream commit 5d2295f3a93b04986d069ebeaf5b07725f9096c1 ] - -The recent change to avoid taking the expiry lock when a timer is currently -migrated missed to add a bracket at the end of the if statement leading to -compile errors. Since that commit the variable `migration_base' is always -used but it is only available on SMP configuration thus leading to another -compile error. The changelog says "The timer base and base->cpu_base -cannot be NULL in the code path", so it is safe to limit this check to SMP -configurations only. - -Add the missing bracket to the if statement and hide `migration_base' -behind CONFIG_SMP bars. - -[ tglx: Mark the functions inline ... ] - -Fixes: 68b2c8c1e4210 ("hrtimer: Don't take expiry_lock when timer is currently migrated") -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner -Link: https://lkml.kernel.org/r/20190904145527.eah7z56ntwobqm6j@linutronix.de -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: port back to RT] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/time/hrtimer.c | 12 +++++++++++- - 1 file changed, 11 insertions(+), 1 deletion(-) - -diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 1a5167c68310..e54a95de8b79 100644 ---- a/kernel/time/hrtimer.c -+++ b/kernel/time/hrtimer.c -@@ -150,6 +150,11 @@ static struct hrtimer_cpu_base migration_cpu_base = { - - #define migration_base migration_cpu_base.clock_base[0] - -+static inline bool is_migration_base(struct hrtimer_clock_base *base) -+{ -+ return base == &migration_base; -+} -+ - /* - * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock - * means that all timers which are tied to this base via timer->base are -@@ -274,6 +279,11 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, - - #else /* CONFIG_SMP */ - -+static inline bool is_migration_base(struct hrtimer_clock_base *base) -+{ -+ return false; -+} -+ - static inline struct hrtimer_clock_base * - lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) - { -@@ -943,7 +953,7 @@ void hrtimer_grab_expiry_lock(const struct hrtimer *timer) - { - struct hrtimer_clock_base *base = READ_ONCE(timer->base); - -- if (timer->is_soft && base != &migration_base) { -+ if (timer->is_soft && is_migration_base(base)) { - spin_lock(&base->cpu_base->softirq_expiry_lock); - spin_unlock(&base->cpu_base->softirq_expiry_lock); - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch b/kernel/patches-4.19.x-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch deleted file mode 100644 index ba41b6956..000000000 --- a/kernel/patches-4.19.x-rt/0301-posix-timers-Unlock-expiry-lock-in-the-early-return.patch +++ /dev/null @@ -1,40 +0,0 @@ -From ae0252a9cd108b1f72aa0d7ff0dfc9a888a05d81 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 16 Sep 2019 12:33:59 +0200 -Subject: [PATCH 301/328] posix-timers: Unlock expiry lock in the early return - -[ Upstream commit 356a2781375ec58521a9bc3f500488745990c242 ] - -Patch ("posix-timers: Add expiry lock") acquired a lock in -run_posix_cpu_timers() but didn't drop the lock in the early return. - -Unlock the lock in the early return path. - -Reported-by: kbuild test robot -Reported-by: Dan Carpenter -Reviewed-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/time/posix-cpu-timers.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c -index 765e700962ab..c9964dc3276b 100644 ---- a/kernel/time/posix-cpu-timers.c -+++ b/kernel/time/posix-cpu-timers.c -@@ -1175,8 +1175,10 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) - expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock); - spin_lock(expiry_lock); - -- if (!lock_task_sighand(tsk, &flags)) -+ if (!lock_task_sighand(tsk, &flags)) { -+ spin_unlock(expiry_lock); - return; -+ } - /* - * Here we take off tsk->signal->cpu_timers[N] and - * tsk->cpu_timers[N] all the timers that are firing, and --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch b/kernel/patches-4.19.x-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch deleted file mode 100644 index 6ec8f390a..000000000 --- a/kernel/patches-4.19.x-rt/0302-sched-migrate_dis-enable-Use-sleeping_lock-to-annota.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 1e2e07b7ee887a2c15a4b686358602b5316d3002 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Tue, 24 Sep 2019 14:36:41 +0200 -Subject: [PATCH 302/328] =?UTF-8?q?sched:=20migrate=5Fdis/enable:=20Use=20?= - =?UTF-8?q?sleeping=5Flock=E2=80=A6()=20to=20annotate=20sleeping=20points?= -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -[ Upstream commit 4230dd3824c3e1785504e6f757ce79a4b55651fa ] - -Without this, rcu_note_context_switch() will complain if an RCU read lock -is held when migrate_enable() calls stop_one_cpu(). Likewise when -migrate_disable() calls pin_current_cpu() which calls __read_rt_lock() -- -which bypasses the part of the mutex code that calls sleeping_lock_inc(). - -Signed-off-by: Scott Wood -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: use sleeping_lock_…() ] -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/cpu.c | 2 ++ - kernel/sched/core.c | 3 +++ - 2 files changed, 5 insertions(+) - -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 1541189f417b..0d50ae262898 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -307,7 +307,9 @@ void pin_current_cpu(void) - preempt_lazy_enable(); - preempt_enable(); - -+ sleeping_lock_inc(); - __read_rt_lock(cpuhp_pin); -+ sleeping_lock_dec(); - - preempt_disable(); - preempt_lazy_disable(); -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index a1c0123e7636..3413b9ebef1f 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7372,7 +7372,10 @@ void migrate_enable(void) - unpin_current_cpu(); - preempt_lazy_enable(); - preempt_enable(); -+ -+ sleeping_lock_inc(); - stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); -+ sleeping_lock_dec(); - tlb_migrate_finish(p->mm); - - return; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch b/kernel/patches-4.19.x-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch deleted file mode 100644 index d7e9fa83c..000000000 --- a/kernel/patches-4.19.x-rt/0303-sched-__set_cpus_allowed_ptr-Check-cpus_mask-not-cpu.patch +++ /dev/null @@ -1,38 +0,0 @@ -From ee49bec178fb001b6bc6d6db36cc4832f1c635c3 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Sat, 27 Jul 2019 00:56:32 -0500 -Subject: [PATCH 303/328] sched: __set_cpus_allowed_ptr: Check cpus_mask, not - cpus_ptr - -[ Upstream commit e5606fb7b042db634ed62b4dd733d62e050e468f ] - -This function is concerned with the long-term cpu mask, not the -transitory mask the task might have while migrate disabled. Before -this patch, if a task was migrate disabled at the time -__set_cpus_allowed_ptr() was called, and the new mask happened to be -equal to the cpu that the task was running on, then the mask update -would be lost. - -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 3413b9ebef1f..d6bd8129a390 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1157,7 +1157,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, - goto out; - } - -- if (cpumask_equal(p->cpus_ptr, new_mask)) -+ if (cpumask_equal(&p->cpus_mask, new_mask)) - goto out; - - dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch b/kernel/patches-4.19.x-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch deleted file mode 100644 index 72c8fe411..000000000 --- a/kernel/patches-4.19.x-rt/0304-sched-Remove-dead-__migrate_disabled-check.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0f49b94879548ccd41d4518afa7cf6b88b578656 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Sat, 27 Jul 2019 00:56:33 -0500 -Subject: [PATCH 304/328] sched: Remove dead __migrate_disabled() check - -[ Upstream commit 14d9272d534ea91262e15db99443fc5995c7c016 ] - -This code was unreachable given the __migrate_disabled() branch -to "out" immediately beforehand. - -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 7 ------- - 1 file changed, 7 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d6bd8129a390..a29f33e776d0 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1182,13 +1182,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, - if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) - goto out; - --#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -- if (__migrate_disabled(p)) { -- p->migrate_disable_update = 1; -- goto out; -- } --#endif -- - if (task_running(rq, p) || p->state == TASK_WAKING) { - struct migration_arg arg = { p, dest_cpu }; - /* Need help from migration thread: drop lock and wait. */ --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch b/kernel/patches-4.19.x-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch deleted file mode 100644 index bd6e22db7..000000000 --- a/kernel/patches-4.19.x-rt/0305-sched-migrate-disable-Protect-cpus_ptr-with-lock.patch +++ /dev/null @@ -1,46 +0,0 @@ -From db5f1e12f85243ff54296bf28fa3cd8d0b01dac6 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Sat, 27 Jul 2019 00:56:34 -0500 -Subject: [PATCH 305/328] sched: migrate disable: Protect cpus_ptr with lock - -[ Upstream commit 27ee52a891ed2c7e2e2c8332ccae0de7c2674b09 ] - -Various places assume that cpus_ptr is protected by rq/pi locks, -so don't change it before grabbing those locks. - -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index a29f33e776d0..d9a3f88508ee 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7250,9 +7250,8 @@ migrate_disable_update_cpus_allowed(struct task_struct *p) - struct rq *rq; - struct rq_flags rf; - -- p->cpus_ptr = cpumask_of(smp_processor_id()); -- - rq = task_rq_lock(p, &rf); -+ p->cpus_ptr = cpumask_of(smp_processor_id()); - update_nr_migratory(p, -1); - p->nr_cpus_allowed = 1; - task_rq_unlock(rq, p, &rf); -@@ -7264,9 +7263,8 @@ migrate_enable_update_cpus_allowed(struct task_struct *p) - struct rq *rq; - struct rq_flags rf; - -- p->cpus_ptr = &p->cpus_mask; -- - rq = task_rq_lock(p, &rf); -+ p->cpus_ptr = &p->cpus_mask; - p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask); - update_nr_migratory(p, 1); - task_rq_unlock(rq, p, &rf); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch b/kernel/patches-4.19.x-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch deleted file mode 100644 index 82c63b230..000000000 --- a/kernel/patches-4.19.x-rt/0306-lib-smp_processor_id-Don-t-use-cpumask_equal.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 053a5be342cf9ab6925975f8ce90e62bf13c81b1 Mon Sep 17 00:00:00 2001 -From: Waiman Long -Date: Thu, 3 Oct 2019 16:36:08 -0400 -Subject: [PATCH 306/328] lib/smp_processor_id: Don't use cpumask_equal() - -[ Upstream commit 659252061477862f45b79e1de169e6030f5c8918 ] - -The check_preemption_disabled() function uses cpumask_equal() to see -if the task is bounded to the current CPU only. cpumask_equal() calls -memcmp() to do the comparison. As x86 doesn't have __HAVE_ARCH_MEMCMP, -the slow memcmp() function in lib/string.c is used. - -On a RT kernel that call check_preemption_disabled() very frequently, -below is the perf-record output of a certain microbenchmark: - - 42.75% 2.45% testpmd [kernel.kallsyms] [k] check_preemption_disabled - 40.01% 39.97% testpmd [kernel.kallsyms] [k] memcmp - -We should avoid calling memcmp() in performance critical path. So the -cpumask_equal() call is now replaced with an equivalent simpler check. - -Signed-off-by: Waiman Long -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - lib/smp_processor_id.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c -index fb35c45b9421..b8a8a8db2d75 100644 ---- a/lib/smp_processor_id.c -+++ b/lib/smp_processor_id.c -@@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, - * Kernel threads bound to a single CPU can safely use - * smp_processor_id(): - */ -- if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) -+ if (current->nr_cpus_allowed == 1) - goto out; - - /* --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch b/kernel/patches-4.19.x-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch deleted file mode 100644 index a3fdb041d..000000000 --- a/kernel/patches-4.19.x-rt/0307-futex-Make-the-futex_hash_bucket-spinlock_t-again-an.patch +++ /dev/null @@ -1,737 +0,0 @@ -From 4e0a2787e8d6edaa2dcec8a204194283253d8247 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Mon, 7 Oct 2019 16:45:18 +0200 -Subject: [PATCH 307/328] futex: Make the futex_hash_bucket spinlock_t again - and bring back its old state - -[ Upstream commit 954ad80c23edfe71f4e8ce70b961eac884320c3a ] - -This is an all-in-one patch that reverts the patches: - futex: Make the futex_hash_bucket lock raw - futex: Delay deallocation of pi_state - -and adds back the old patches we had: - futex: workaround migrate_disable/enable in different context - rtmutex: Handle the various new futex race conditions - futex: Fix bug on when a requeued RT task times out - futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/futex.c | 230 ++++++++++++++++++-------------- - kernel/locking/rtmutex.c | 65 ++++++++- - kernel/locking/rtmutex_common.h | 3 + - 3 files changed, 194 insertions(+), 104 deletions(-) - -diff --git a/kernel/futex.c b/kernel/futex.c -index 0b8cff8d9162..e815cf542b82 100644 ---- a/kernel/futex.c -+++ b/kernel/futex.c -@@ -243,7 +243,7 @@ struct futex_q { - struct plist_node list; - - struct task_struct *task; -- raw_spinlock_t *lock_ptr; -+ spinlock_t *lock_ptr; - union futex_key key; - struct futex_pi_state *pi_state; - struct rt_mutex_waiter *rt_waiter; -@@ -264,7 +264,7 @@ static const struct futex_q futex_q_init = { - */ - struct futex_hash_bucket { - atomic_t waiters; -- raw_spinlock_t lock; -+ spinlock_t lock; - struct plist_head chain; - } ____cacheline_aligned_in_smp; - -@@ -825,13 +825,13 @@ static void get_pi_state(struct futex_pi_state *pi_state) - * Drops a reference to the pi_state object and frees or caches it - * when the last reference is gone. - */ --static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) -+static void put_pi_state(struct futex_pi_state *pi_state) - { - if (!pi_state) -- return NULL; -+ return; - - if (!atomic_dec_and_test(&pi_state->refcount)) -- return NULL; -+ return; - - /* - * If pi_state->owner is NULL, the owner is most probably dying -@@ -851,7 +851,9 @@ static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - } - -- if (!current->pi_state_cache) { -+ if (current->pi_state_cache) { -+ kfree(pi_state); -+ } else { - /* - * pi_state->list is already empty. - * clear pi_state->owner. -@@ -860,30 +862,6 @@ static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state) - pi_state->owner = NULL; - atomic_set(&pi_state->refcount, 1); - current->pi_state_cache = pi_state; -- pi_state = NULL; -- } -- return pi_state; --} -- --static void put_pi_state(struct futex_pi_state *pi_state) --{ -- kfree(__put_pi_state(pi_state)); --} -- --static void put_pi_state_atomic(struct futex_pi_state *pi_state, -- struct list_head *to_free) --{ -- if (__put_pi_state(pi_state)) -- list_add(&pi_state->list, to_free); --} -- --static void free_pi_state_list(struct list_head *to_free) --{ -- struct futex_pi_state *p, *next; -- -- list_for_each_entry_safe(p, next, to_free, list) { -- list_del(&p->list); -- kfree(p); - } - } - -@@ -900,7 +878,6 @@ void exit_pi_state_list(struct task_struct *curr) - struct futex_pi_state *pi_state; - struct futex_hash_bucket *hb; - union futex_key key = FUTEX_KEY_INIT; -- LIST_HEAD(to_free); - - if (!futex_cmpxchg_enabled) - return; -@@ -934,7 +911,7 @@ void exit_pi_state_list(struct task_struct *curr) - } - raw_spin_unlock_irq(&curr->pi_lock); - -- raw_spin_lock(&hb->lock); -+ spin_lock(&hb->lock); - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - raw_spin_lock(&curr->pi_lock); - /* -@@ -944,8 +921,10 @@ void exit_pi_state_list(struct task_struct *curr) - if (head->next != next) { - /* retain curr->pi_lock for the loop invariant */ - raw_spin_unlock(&pi_state->pi_mutex.wait_lock); -- raw_spin_unlock(&hb->lock); -- put_pi_state_atomic(pi_state, &to_free); -+ raw_spin_unlock_irq(&curr->pi_lock); -+ spin_unlock(&hb->lock); -+ raw_spin_lock_irq(&curr->pi_lock); -+ put_pi_state(pi_state); - continue; - } - -@@ -956,7 +935,7 @@ void exit_pi_state_list(struct task_struct *curr) - - raw_spin_unlock(&curr->pi_lock); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); -- raw_spin_unlock(&hb->lock); -+ spin_unlock(&hb->lock); - - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); -@@ -964,8 +943,6 @@ void exit_pi_state_list(struct task_struct *curr) - raw_spin_lock_irq(&curr->pi_lock); - } - raw_spin_unlock_irq(&curr->pi_lock); -- -- free_pi_state_list(&to_free); - } - - #endif -@@ -1452,7 +1429,7 @@ static void __unqueue_futex(struct futex_q *q) - { - struct futex_hash_bucket *hb; - -- if (WARN_ON_SMP(!q->lock_ptr || !raw_spin_is_locked(q->lock_ptr)) -+ if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) - || WARN_ON(plist_node_empty(&q->list))) - return; - -@@ -1580,21 +1557,21 @@ static inline void - double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) - { - if (hb1 <= hb2) { -- raw_spin_lock(&hb1->lock); -+ spin_lock(&hb1->lock); - if (hb1 < hb2) -- raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); -+ spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); - } else { /* hb1 > hb2 */ -- raw_spin_lock(&hb2->lock); -- raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); -+ spin_lock(&hb2->lock); -+ spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); - } - } - - static inline void - double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) - { -- raw_spin_unlock(&hb1->lock); -+ spin_unlock(&hb1->lock); - if (hb1 != hb2) -- raw_spin_unlock(&hb2->lock); -+ spin_unlock(&hb2->lock); - } - - /* -@@ -1622,7 +1599,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) - if (!hb_waiters_pending(hb)) - goto out_put_key; - -- raw_spin_lock(&hb->lock); -+ spin_lock(&hb->lock); - - plist_for_each_entry_safe(this, next, &hb->chain, list) { - if (match_futex (&this->key, &key)) { -@@ -1641,7 +1618,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) - } - } - -- raw_spin_unlock(&hb->lock); -+ spin_unlock(&hb->lock); - wake_up_q(&wake_q); - out_put_key: - put_futex_key(&key); -@@ -1948,7 +1925,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - struct futex_hash_bucket *hb1, *hb2; - struct futex_q *this, *next; - DEFINE_WAKE_Q(wake_q); -- LIST_HEAD(to_free); - - if (nr_wake < 0 || nr_requeue < 0) - return -EINVAL; -@@ -2176,6 +2152,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - requeue_pi_wake_futex(this, &key2, hb2); - drop_count++; - continue; -+ } else if (ret == -EAGAIN) { -+ /* -+ * Waiter was woken by timeout or -+ * signal and has set pi_blocked_on to -+ * PI_WAKEUP_INPROGRESS before we -+ * tried to enqueue it on the rtmutex. -+ */ -+ this->pi_state = NULL; -+ put_pi_state(pi_state); -+ continue; - } else if (ret) { - /* - * rt_mutex_start_proxy_lock() detected a -@@ -2186,7 +2172,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - * object. - */ - this->pi_state = NULL; -- put_pi_state_atomic(pi_state, &to_free); -+ put_pi_state(pi_state); - /* - * We stop queueing more waiters and let user - * space deal with the mess. -@@ -2203,7 +2189,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We - * need to drop it here again. - */ -- put_pi_state_atomic(pi_state, &to_free); -+ put_pi_state(pi_state); - - out_unlock: - double_unlock_hb(hb1, hb2); -@@ -2224,7 +2210,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, - out_put_key1: - put_futex_key(&key1); - out: -- free_pi_state_list(&to_free); - return ret ? ret : task_count; - } - -@@ -2248,7 +2233,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) - - q->lock_ptr = &hb->lock; - -- raw_spin_lock(&hb->lock); /* implies smp_mb(); (A) */ -+ spin_lock(&hb->lock); /* implies smp_mb(); (A) */ - return hb; - } - -@@ -2256,7 +2241,7 @@ static inline void - queue_unlock(struct futex_hash_bucket *hb) - __releases(&hb->lock) - { -- raw_spin_unlock(&hb->lock); -+ spin_unlock(&hb->lock); - hb_waiters_dec(hb); - } - -@@ -2295,7 +2280,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) - __releases(&hb->lock) - { - __queue_me(q, hb); -- raw_spin_unlock(&hb->lock); -+ spin_unlock(&hb->lock); - } - - /** -@@ -2311,41 +2296,41 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) - */ - static int unqueue_me(struct futex_q *q) - { -- raw_spinlock_t *lock_ptr; -+ spinlock_t *lock_ptr; - int ret = 0; - - /* In the common case we don't take the spinlock, which is nice. */ - retry: - /* -- * q->lock_ptr can change between this read and the following -- * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading -- * q->lock_ptr and optimizing lock_ptr out of the logic below. -+ * q->lock_ptr can change between this read and the following spin_lock. -+ * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and -+ * optimizing lock_ptr out of the logic below. - */ - lock_ptr = READ_ONCE(q->lock_ptr); - if (lock_ptr != NULL) { -- raw_spin_lock(lock_ptr); -+ spin_lock(lock_ptr); - /* - * q->lock_ptr can change between reading it and -- * raw_spin_lock(), causing us to take the wrong lock. This -+ * spin_lock(), causing us to take the wrong lock. This - * corrects the race condition. - * - * Reasoning goes like this: if we have the wrong lock, - * q->lock_ptr must have changed (maybe several times) -- * between reading it and the raw_spin_lock(). It can -- * change again after the raw_spin_lock() but only if it was -- * already changed before the raw_spin_lock(). It cannot, -+ * between reading it and the spin_lock(). It can -+ * change again after the spin_lock() but only if it was -+ * already changed before the spin_lock(). It cannot, - * however, change back to the original value. Therefore - * we can detect whether we acquired the correct lock. - */ - if (unlikely(lock_ptr != q->lock_ptr)) { -- raw_spin_unlock(lock_ptr); -+ spin_unlock(lock_ptr); - goto retry; - } - __unqueue_futex(q); - - BUG_ON(q->pi_state); - -- raw_spin_unlock(lock_ptr); -+ spin_unlock(lock_ptr); - ret = 1; - } - -@@ -2361,16 +2346,13 @@ static int unqueue_me(struct futex_q *q) - static void unqueue_me_pi(struct futex_q *q) - __releases(q->lock_ptr) - { -- struct futex_pi_state *ps; -- - __unqueue_futex(q); - - BUG_ON(!q->pi_state); -- ps = __put_pi_state(q->pi_state); -+ put_pi_state(q->pi_state); - q->pi_state = NULL; - -- raw_spin_unlock(q->lock_ptr); -- kfree(ps); -+ spin_unlock(q->lock_ptr); - } - - static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, -@@ -2503,7 +2485,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - */ - handle_err: - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); -- raw_spin_unlock(q->lock_ptr); -+ spin_unlock(q->lock_ptr); - - switch (err) { - case -EFAULT: -@@ -2521,7 +2503,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - break; - } - -- raw_spin_lock(q->lock_ptr); -+ spin_lock(q->lock_ptr); - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - - /* -@@ -2617,7 +2599,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, - /* - * The task state is guaranteed to be set before another task can - * wake it. set_current_state() is implemented using smp_store_mb() and -- * queue_me() calls raw_spin_unlock() upon completion, both serializing -+ * queue_me() calls spin_unlock() upon completion, both serializing - * access to the hash list and forcing another memory barrier. - */ - set_current_state(TASK_INTERRUPTIBLE); -@@ -2908,7 +2890,15 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - * before __rt_mutex_start_proxy_lock() is done. - */ - raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); -- raw_spin_unlock(q.lock_ptr); -+ /* -+ * the migrate_disable() here disables migration in the in_atomic() fast -+ * path which is enabled again in the following spin_unlock(). We have -+ * one migrate_disable() pending in the slow-path which is reversed -+ * after the raw_spin_unlock_irq() where we leave the atomic context. -+ */ -+ migrate_disable(); -+ -+ spin_unlock(q.lock_ptr); - /* - * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter - * such that futex_unlock_pi() is guaranteed to observe the waiter when -@@ -2916,6 +2906,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - */ - ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); - raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); -+ migrate_enable(); - - if (ret) { - if (ret == 1) -@@ -2929,7 +2920,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, - ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); - - cleanup: -- raw_spin_lock(q.lock_ptr); -+ spin_lock(q.lock_ptr); - /* - * If we failed to acquire the lock (deadlock/signal/timeout), we must - * first acquire the hb->lock before removing the lock from the -@@ -3030,7 +3021,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - return ret; - - hb = hash_futex(&key); -- raw_spin_lock(&hb->lock); -+ spin_lock(&hb->lock); - - /* - * Check waiters first. We do not trust user space values at -@@ -3064,10 +3055,19 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - * rt_waiter. Also see the WARN in wake_futex_pi(). - */ - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); -- raw_spin_unlock(&hb->lock); -+ /* -+ * Magic trickery for now to make the RT migrate disable -+ * logic happy. The following spin_unlock() happens with -+ * interrupts disabled so the internal migrate_enable() -+ * won't undo the migrate_disable() which was issued when -+ * locking hb->lock. -+ */ -+ migrate_disable(); -+ spin_unlock(&hb->lock); - - /* drops pi_state->pi_mutex.wait_lock */ - ret = wake_futex_pi(uaddr, uval, pi_state); -+ migrate_enable(); - - put_pi_state(pi_state); - -@@ -3103,7 +3103,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - * owner. - */ - if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { -- raw_spin_unlock(&hb->lock); -+ spin_unlock(&hb->lock); - switch (ret) { - case -EFAULT: - goto pi_faulted; -@@ -3123,7 +3123,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) - ret = (curval == uval) ? 0 : -EAGAIN; - - out_unlock: -- raw_spin_unlock(&hb->lock); -+ spin_unlock(&hb->lock); - out_putkey: - put_futex_key(&key); - return ret; -@@ -3239,7 +3239,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - struct hrtimer_sleeper timeout, *to = NULL; - struct futex_pi_state *pi_state = NULL; - struct rt_mutex_waiter rt_waiter; -- struct futex_hash_bucket *hb; -+ struct futex_hash_bucket *hb, *hb2; - union futex_key key2 = FUTEX_KEY_INIT; - struct futex_q q = futex_q_init; - int res, ret; -@@ -3297,20 +3297,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue_me(hb, &q, to); - -- raw_spin_lock(&hb->lock); -- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -- raw_spin_unlock(&hb->lock); -- if (ret) -- goto out_put_keys; -+ /* -+ * On RT we must avoid races with requeue and trying to block -+ * on two mutexes (hb->lock and uaddr2's rtmutex) by -+ * serializing access to pi_blocked_on with pi_lock. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ if (current->pi_blocked_on) { -+ /* -+ * We have been requeued or are in the process of -+ * being requeued. -+ */ -+ raw_spin_unlock_irq(¤t->pi_lock); -+ } else { -+ /* -+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS -+ * prevents a concurrent requeue from moving us to the -+ * uaddr2 rtmutex. After that we can safely acquire -+ * (and possibly block on) hb->lock. -+ */ -+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ spin_lock(&hb->lock); -+ -+ /* -+ * Clean up pi_blocked_on. We might leak it otherwise -+ * when we succeeded with the hb->lock in the fast -+ * path. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ current->pi_blocked_on = NULL; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -+ spin_unlock(&hb->lock); -+ if (ret) -+ goto out_put_keys; -+ } - - /* -- * In order for us to be here, we know our q.key == key2, and since -- * we took the hb->lock above, we also know that futex_requeue() has -- * completed and we no longer have to concern ourselves with a wakeup -- * race with the atomic proxy lock acquisition by the requeue code. The -- * futex_requeue dropped our key1 reference and incremented our key2 -- * reference count. -+ * In order to be here, we have either been requeued, are in -+ * the process of being requeued, or requeue successfully -+ * acquired uaddr2 on our behalf. If pi_blocked_on was -+ * non-null above, we may be racing with a requeue. Do not -+ * rely on q->lock_ptr to be hb2->lock until after blocking on -+ * hb->lock or hb2->lock. The futex_requeue dropped our key1 -+ * reference and incremented our key2 reference count. - */ -+ hb2 = hash_futex(&key2); - - /* Check if the requeue code acquired the second futex for us. */ - if (!q.rt_waiter) { -@@ -3319,9 +3354,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -- struct futex_pi_state *ps_free; -- -- raw_spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; -@@ -3331,9 +3365,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - * Drop the reference to the pi state which - * the requeue_pi() code acquired for us. - */ -- ps_free = __put_pi_state(q.pi_state); -- raw_spin_unlock(q.lock_ptr); -- kfree(ps_free); -+ put_pi_state(q.pi_state); -+ spin_unlock(&hb2->lock); - } - } else { - struct rt_mutex *pi_mutex; -@@ -3347,7 +3380,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, - pi_mutex = &q.pi_state->pi_mutex; - ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - -- raw_spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) - ret = 0; - -@@ -4014,7 +4048,7 @@ static int __init futex_init(void) - for (i = 0; i < futex_hashsize; i++) { - atomic_set(&futex_queues[i].waiters, 0); - plist_head_init(&futex_queues[i].chain); -- raw_spin_lock_init(&futex_queues[i].lock); -+ spin_lock_init(&futex_queues[i].lock); - } - - return 0; -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 44a33057a83a..2a9bf2443acc 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -142,6 +142,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) - WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); - } - -+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) -+{ -+ return waiter && waiter != PI_WAKEUP_INPROGRESS && -+ waiter != PI_REQUEUE_INPROGRESS; -+} -+ - /* - * We can speed up the acquire/release, if there's no debugging state to be - * set up. -@@ -415,7 +421,8 @@ int max_lock_depth = 1024; - - static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) - { -- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; -+ return rt_mutex_real_waiter(p->pi_blocked_on) ? -+ p->pi_blocked_on->lock : NULL; - } - - /* -@@ -551,7 +558,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, - * reached or the state of the chain has changed while we - * dropped the locks. - */ -- if (!waiter) -+ if (!rt_mutex_real_waiter(waiter)) - goto out_unlock_pi; - - /* -@@ -1321,6 +1328,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - return -EDEADLK; - - raw_spin_lock(&task->pi_lock); -+ /* -+ * In the case of futex requeue PI, this will be a proxy -+ * lock. The task will wake unaware that it is enqueueed on -+ * this lock. Avoid blocking on two locks and corrupting -+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS -+ * flag. futex_wait_requeue_pi() sets this when it wakes up -+ * before requeue (due to a signal or timeout). Do not enqueue -+ * the task if PI_WAKEUP_INPROGRESS is set. -+ */ -+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { -+ raw_spin_unlock(&task->pi_lock); -+ return -EAGAIN; -+ } -+ -+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); -+ - waiter->task = task; - waiter->lock = lock; - waiter->prio = task->prio; -@@ -1344,7 +1367,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - rt_mutex_enqueue_pi(owner, waiter); - - rt_mutex_adjust_prio(owner); -- if (owner->pi_blocked_on) -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) - chain_walk = 1; - } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { - chain_walk = 1; -@@ -1444,7 +1467,7 @@ static void remove_waiter(struct rt_mutex *lock, - { - bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); - struct task_struct *owner = rt_mutex_owner(lock); -- struct rt_mutex *next_lock; -+ struct rt_mutex *next_lock = NULL; - - lockdep_assert_held(&lock->wait_lock); - -@@ -1470,7 +1493,8 @@ static void remove_waiter(struct rt_mutex *lock, - rt_mutex_adjust_prio(owner); - - /* Store the lock on which owner is blocked or NULL */ -- next_lock = task_blocked_on_lock(owner); -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) -+ next_lock = task_blocked_on_lock(owner); - - raw_spin_unlock(&owner->pi_lock); - -@@ -1506,7 +1530,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) - raw_spin_lock_irqsave(&task->pi_lock, flags); - - waiter = task->pi_blocked_on; -- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { -+ if (!rt_mutex_real_waiter(waiter) || -+ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return; - } -@@ -2325,6 +2350,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - if (try_to_take_rt_mutex(lock, task, NULL)) - return 1; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * In PREEMPT_RT there's an added race. -+ * If the task, that we are about to requeue, times out, -+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue -+ * to skip this task. But right after the task sets -+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then -+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. -+ * This will replace the PI_WAKEUP_INPROGRESS with the actual -+ * lock that it blocks on. We *must not* place this task -+ * on this proxy lock in that case. -+ * -+ * To prevent this race, we first take the task's pi_lock -+ * and check if it has updated its pi_blocked_on. If it has, -+ * we assume that it woke up and we return -EAGAIN. -+ * Otherwise, we set the task's pi_blocked_on to -+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up -+ * it will know that we are in the process of requeuing it. -+ */ -+ raw_spin_lock(&task->pi_lock); -+ if (task->pi_blocked_on) { -+ raw_spin_unlock(&task->pi_lock); -+ return -EAGAIN; -+ } -+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; -+ raw_spin_unlock(&task->pi_lock); -+#endif -+ - /* We enforce deadlock detection for futexes */ - ret = task_blocks_on_rt_mutex(lock, waiter, task, - RT_MUTEX_FULL_CHAINWALK); -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h -index 758dc43872e5..546aaf058b9e 100644 ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -132,6 +132,9 @@ enum rtmutex_chainwalk { - /* - * PI-futex support (proxy locking functions, etc.): - */ -+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) -+ - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch b/kernel/patches-4.19.x-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch deleted file mode 100644 index 68126d7f0..000000000 --- a/kernel/patches-4.19.x-rt/0308-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 7a083b11dea5905627872e0986c532a3e11716cf Mon Sep 17 00:00:00 2001 -From: Peter Zijlstra -Date: Mon, 30 Sep 2019 18:15:44 +0200 -Subject: [PATCH 308/328] locking/rtmutex: Clean ->pi_blocked_on in the error - case - -[ Upstream commit 0be4ea6e3ce693101be0fbd55a0cc7ce238ab2eb ] - -The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case -of failure (timeout, signal). The same cleanup is required in -__rt_mutex_start_proxy_lock(). -In both the cases the tasks was interrupted by a signal or timeout while -acquiring the lock and after the interruption it longer blocks on the -lock. - -Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly") -Signed-off-by: Peter Zijlstra (Intel) -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++----------------- - 1 file changed, 25 insertions(+), 18 deletions(-) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 2a9bf2443acc..63b3d6f306fa 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -2320,6 +2320,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, - rt_mutex_set_owner(lock, NULL); - } - -+static void fixup_rt_mutex_blocked(struct rt_mutex *lock) -+{ -+ struct task_struct *tsk = current; -+ /* -+ * RT has a problem here when the wait got interrupted by a timeout -+ * or a signal. task->pi_blocked_on is still set. The task must -+ * acquire the hash bucket lock when returning from this function. -+ * -+ * If the hash bucket lock is contended then the -+ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in -+ * task_blocks_on_rt_mutex() will trigger. This can be avoided by -+ * clearing task->pi_blocked_on which removes the task from the -+ * boosting chain of the rtmutex. That's correct because the task -+ * is not longer blocked on it. -+ */ -+ raw_spin_lock(&tsk->pi_lock); -+ tsk->pi_blocked_on = NULL; -+ raw_spin_unlock(&tsk->pi_lock); -+} -+ - /** - * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task - * @lock: the rt_mutex to take -@@ -2392,6 +2412,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - ret = 0; - } - -+ if (ret) -+ fixup_rt_mutex_blocked(lock); -+ - debug_rt_mutex_print_deadlock(waiter); - - return ret; -@@ -2472,7 +2495,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *to, - struct rt_mutex_waiter *waiter) - { -- struct task_struct *tsk = current; - int ret; - - raw_spin_lock_irq(&lock->wait_lock); -@@ -2484,23 +2506,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - * have to fix that up. - */ - fixup_rt_mutex_waiters(lock); -- /* -- * RT has a problem here when the wait got interrupted by a timeout -- * or a signal. task->pi_blocked_on is still set. The task must -- * acquire the hash bucket lock when returning from this function. -- * -- * If the hash bucket lock is contended then the -- * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in -- * task_blocks_on_rt_mutex() will trigger. This can be avoided by -- * clearing task->pi_blocked_on which removes the task from the -- * boosting chain of the rtmutex. That's correct because the task -- * is not longer blocked on it. -- */ -- if (ret) { -- raw_spin_lock(&tsk->pi_lock); -- tsk->pi_blocked_on = NULL; -- raw_spin_unlock(&tsk->pi_lock); -- } -+ if (ret) -+ fixup_rt_mutex_blocked(lock); - - raw_spin_unlock_irq(&lock->wait_lock); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch b/kernel/patches-4.19.x-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch deleted file mode 100644 index d5e5d9a1d..000000000 --- a/kernel/patches-4.19.x-rt/0309-lib-ubsan-Don-t-seralize-UBSAN-report.patch +++ /dev/null @@ -1,307 +0,0 @@ -From 63329ddc5674e1b5c0837ed4a5db590246ba0835 Mon Sep 17 00:00:00 2001 -From: Julien Grall -Date: Fri, 20 Sep 2019 11:08:35 +0100 -Subject: [PATCH 309/328] lib/ubsan: Don't seralize UBSAN report - -[ Upstream commit 4702c28ac777b27acb499cbd5e8e787ce1a7d82d ] - -At the moment, UBSAN report will be serialized using a spin_lock(). On -RT-systems, spinlocks are turned to rt_spin_lock and may sleep. This will -result to the following splat if the undefined behavior is in a context -that can sleep: - -| BUG: sleeping function called from invalid context at /src/linux/kernel/locking/rtmutex.c:968 -| in_atomic(): 1, irqs_disabled(): 128, pid: 3447, name: make -| 1 lock held by make/3447: -| #0: 000000009a966332 (&mm->mmap_sem){++++}, at: do_page_fault+0x140/0x4f8 -| Preemption disabled at: -| [] rt_mutex_futex_unlock+0x4c/0xb0 -| CPU: 3 PID: 3447 Comm: make Tainted: G W 5.2.14-rt7-01890-ge6e057589653 #911 -| Call trace: -| dump_backtrace+0x0/0x148 -| show_stack+0x14/0x20 -| dump_stack+0xbc/0x104 -| ___might_sleep+0x154/0x210 -| rt_spin_lock+0x68/0xa0 -| ubsan_prologue+0x30/0x68 -| handle_overflow+0x64/0xe0 -| __ubsan_handle_add_overflow+0x10/0x18 -| __lock_acquire+0x1c28/0x2a28 -| lock_acquire+0xf0/0x370 -| _raw_spin_lock_irqsave+0x58/0x78 -| rt_mutex_futex_unlock+0x4c/0xb0 -| rt_spin_unlock+0x28/0x70 -| get_page_from_freelist+0x428/0x2b60 -| __alloc_pages_nodemask+0x174/0x1708 -| alloc_pages_vma+0x1ac/0x238 -| __handle_mm_fault+0x4ac/0x10b0 -| handle_mm_fault+0x1d8/0x3b0 -| do_page_fault+0x1c8/0x4f8 -| do_translation_fault+0xb8/0xe0 -| do_mem_abort+0x3c/0x98 -| el0_da+0x20/0x24 - -The spin_lock() will protect against multiple CPUs to output a report -together, I guess to prevent them to be interleaved. However, they can -still interleave with other messages (and even splat from __migth_sleep). - -So the lock usefulness seems pretty limited. Rather than trying to -accomodate RT-system by switching to a raw_spin_lock(), the lock is now -completely dropped. - -Link: https://lkml.kernel.org/r/20190920100835.14999-1-julien.grall@arm.com -Reported-by: Andre Przywara -Signed-off-by: Julien Grall -Acked-by: Andrey Ryabinin -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - lib/ubsan.c | 64 +++++++++++++++++++---------------------------------- - 1 file changed, 23 insertions(+), 41 deletions(-) - -diff --git a/lib/ubsan.c b/lib/ubsan.c -index 1e9e2ab25539..5830cc9a2164 100644 ---- a/lib/ubsan.c -+++ b/lib/ubsan.c -@@ -143,25 +143,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, - } - } - --static DEFINE_SPINLOCK(report_lock); -- --static void ubsan_prologue(struct source_location *location, -- unsigned long *flags) -+static void ubsan_prologue(struct source_location *location) - { - current->in_ubsan++; -- spin_lock_irqsave(&report_lock, *flags); - - pr_err("========================================" - "========================================\n"); - print_source_location("UBSAN: Undefined behaviour in", location); - } - --static void ubsan_epilogue(unsigned long *flags) -+static void ubsan_epilogue(void) - { - dump_stack(); - pr_err("========================================" - "========================================\n"); -- spin_unlock_irqrestore(&report_lock, *flags); -+ - current->in_ubsan--; - } - -@@ -170,14 +166,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, - { - - struct type_descriptor *type = data->type; -- unsigned long flags; - char lhs_val_str[VALUE_LENGTH]; - char rhs_val_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); - val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); -@@ -189,7 +184,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, - rhs_val_str, - type->type_name); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - - void __ubsan_handle_add_overflow(struct overflow_data *data, -@@ -217,20 +212,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); - void __ubsan_handle_negate_overflow(struct overflow_data *data, - void *old_val) - { -- unsigned long flags; - char old_val_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); - - pr_err("negation of %s cannot be represented in type %s:\n", - old_val_str, data->type->type_name); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - EXPORT_SYMBOL(__ubsan_handle_negate_overflow); - -@@ -238,13 +232,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); - void __ubsan_handle_divrem_overflow(struct overflow_data *data, - void *lhs, void *rhs) - { -- unsigned long flags; - char rhs_val_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); - -@@ -254,58 +247,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, - else - pr_err("division by zero\n"); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); - - static void handle_null_ptr_deref(struct type_mismatch_data_common *data) - { -- unsigned long flags; -- - if (suppress_report(data->location)) - return; - -- ubsan_prologue(data->location, &flags); -+ ubsan_prologue(data->location); - - pr_err("%s null pointer of type %s\n", - type_check_kinds[data->type_check_kind], - data->type->type_name); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - - static void handle_misaligned_access(struct type_mismatch_data_common *data, - unsigned long ptr) - { -- unsigned long flags; -- - if (suppress_report(data->location)) - return; - -- ubsan_prologue(data->location, &flags); -+ ubsan_prologue(data->location); - - pr_err("%s misaligned address %p for type %s\n", - type_check_kinds[data->type_check_kind], - (void *)ptr, data->type->type_name); - pr_err("which requires %ld byte alignment\n", data->alignment); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - - static void handle_object_size_mismatch(struct type_mismatch_data_common *data, - unsigned long ptr) - { -- unsigned long flags; -- - if (suppress_report(data->location)) - return; - -- ubsan_prologue(data->location, &flags); -+ ubsan_prologue(data->location); - pr_err("%s address %p with insufficient space\n", - type_check_kinds[data->type_check_kind], - (void *) ptr); - pr_err("for an object of type %s\n", data->type->type_name); -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - - static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, -@@ -369,25 +356,23 @@ EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); - - void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) - { -- unsigned long flags; - char index_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(index_str, sizeof(index_str), data->index_type, index); - pr_err("index %s is out of range for type %s\n", index_str, - data->array_type->type_name); -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); - - void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - void *lhs, void *rhs) - { -- unsigned long flags; - struct type_descriptor *rhs_type = data->rhs_type; - struct type_descriptor *lhs_type = data->lhs_type; - char rhs_str[VALUE_LENGTH]; -@@ -396,7 +381,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); - val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); -@@ -419,18 +404,16 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - lhs_str, rhs_str, - lhs_type->type_name); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); - - - void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) - { -- unsigned long flags; -- -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - pr_err("calling __builtin_unreachable()\n"); -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - panic("can't return from __builtin_unreachable()"); - } - EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); -@@ -438,19 +421,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); - void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, - void *val) - { -- unsigned long flags; - char val_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(val_str, sizeof(val_str), data->type, val); - - pr_err("load of value %s is not a valid value for type %s\n", - val_str, data->type->type_name); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch b/kernel/patches-4.19.x-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch deleted file mode 100644 index 73aee28b0..000000000 --- a/kernel/patches-4.19.x-rt/0310-kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch +++ /dev/null @@ -1,292 +0,0 @@ -From fab900cc4385877aa0753a74950d86a8e88a9cec Mon Sep 17 00:00:00 2001 -From: Liu Haitao -Date: Fri, 27 Sep 2019 16:22:30 +0800 -Subject: [PATCH 310/328] kmemleak: Change the lock of kmemleak_object to - raw_spinlock_t - -[ Upstream commit 217847f57119b5fdd377bfa3d344613ddb98d9fc ] - -The commit ("kmemleak: Turn kmemleak_lock to raw spinlock on RT") -changed the kmemleak_lock to raw spinlock. However the -kmemleak_object->lock is held after the kmemleak_lock is held in -scan_block(). - -Make the object->lock a raw_spinlock_t. - -Cc: stable-rt@vger.kernel.org -Link: https://lkml.kernel.org/r/20190927082230.34152-1-yongxin.liu@windriver.com -Signed-off-by: Liu Haitao -Signed-off-by: Yongxin Liu -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - mm/kmemleak.c | 72 +++++++++++++++++++++++++-------------------------- - 1 file changed, 36 insertions(+), 36 deletions(-) - -diff --git a/mm/kmemleak.c b/mm/kmemleak.c -index 92ce99b15f2b..e5f5eeed338d 100644 ---- a/mm/kmemleak.c -+++ b/mm/kmemleak.c -@@ -147,7 +147,7 @@ struct kmemleak_scan_area { - * (use_count) and freed using the RCU mechanism. - */ - struct kmemleak_object { -- spinlock_t lock; -+ raw_spinlock_t lock; - unsigned int flags; /* object status flags */ - struct list_head object_list; - struct list_head gray_list; -@@ -561,7 +561,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, - INIT_LIST_HEAD(&object->object_list); - INIT_LIST_HEAD(&object->gray_list); - INIT_HLIST_HEAD(&object->area_list); -- spin_lock_init(&object->lock); -+ raw_spin_lock_init(&object->lock); - atomic_set(&object->use_count, 1); - object->flags = OBJECT_ALLOCATED; - object->pointer = ptr; -@@ -642,9 +642,9 @@ static void __delete_object(struct kmemleak_object *object) - * Locking here also ensures that the corresponding memory block - * cannot be freed when it is being scanned. - */ -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - object->flags &= ~OBJECT_ALLOCATED; -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - put_object(object); - } - -@@ -716,9 +716,9 @@ static void paint_it(struct kmemleak_object *object, int color) - { - unsigned long flags; - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - __paint_it(object, color); -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - } - - static void paint_ptr(unsigned long ptr, int color) -@@ -778,7 +778,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) - goto out; - } - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - if (size == SIZE_MAX) { - size = object->pointer + object->size - ptr; - } else if (ptr + size > object->pointer + object->size) { -@@ -794,7 +794,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) - - hlist_add_head(&area->node, &object->area_list); - out_unlock: -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - out: - put_object(object); - } -@@ -817,9 +817,9 @@ static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) - return; - } - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - object->excess_ref = excess_ref; -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - put_object(object); - } - -@@ -839,9 +839,9 @@ static void object_no_scan(unsigned long ptr) - return; - } - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - object->flags |= OBJECT_NO_SCAN; -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - put_object(object); - } - -@@ -902,11 +902,11 @@ static void early_alloc(struct early_log *log) - log->min_count, GFP_ATOMIC); - if (!object) - goto out; -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - for (i = 0; i < log->trace_len; i++) - object->trace[i] = log->trace[i]; - object->trace_len = log->trace_len; -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - out: - rcu_read_unlock(); - } -@@ -1096,9 +1096,9 @@ void __ref kmemleak_update_trace(const void *ptr) - return; - } - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - object->trace_len = __save_stack_trace(object->trace); -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - - put_object(object); - } -@@ -1344,7 +1344,7 @@ static void scan_block(void *_start, void *_end, - * previously acquired in scan_object(). These locks are - * enclosed by scan_mutex. - */ -- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); -+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); - /* only pass surplus references (object already gray) */ - if (color_gray(object)) { - excess_ref = object->excess_ref; -@@ -1353,7 +1353,7 @@ static void scan_block(void *_start, void *_end, - excess_ref = 0; - update_refs(object); - } -- spin_unlock(&object->lock); -+ raw_spin_unlock(&object->lock); - - if (excess_ref) { - object = lookup_object(excess_ref, 0); -@@ -1362,9 +1362,9 @@ static void scan_block(void *_start, void *_end, - if (object == scanned) - /* circular reference, ignore */ - continue; -- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); -+ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); - update_refs(object); -- spin_unlock(&object->lock); -+ raw_spin_unlock(&object->lock); - } - } - raw_spin_unlock_irqrestore(&kmemleak_lock, flags); -@@ -1400,7 +1400,7 @@ static void scan_object(struct kmemleak_object *object) - * Once the object->lock is acquired, the corresponding memory block - * cannot be freed (the same lock is acquired in delete_object). - */ -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - if (object->flags & OBJECT_NO_SCAN) - goto out; - if (!(object->flags & OBJECT_ALLOCATED)) -@@ -1419,9 +1419,9 @@ static void scan_object(struct kmemleak_object *object) - if (start >= end) - break; - -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - cond_resched(); -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - } while (object->flags & OBJECT_ALLOCATED); - } else - hlist_for_each_entry(area, &object->area_list, node) -@@ -1429,7 +1429,7 @@ static void scan_object(struct kmemleak_object *object) - (void *)(area->start + area->size), - object); - out: -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - } - - /* -@@ -1482,7 +1482,7 @@ static void kmemleak_scan(void) - /* prepare the kmemleak_object's */ - rcu_read_lock(); - list_for_each_entry_rcu(object, &object_list, object_list) { -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - #ifdef DEBUG - /* - * With a few exceptions there should be a maximum of -@@ -1499,7 +1499,7 @@ static void kmemleak_scan(void) - if (color_gray(object) && get_object(object)) - list_add_tail(&object->gray_list, &gray_list); - -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - } - rcu_read_unlock(); - -@@ -1564,14 +1564,14 @@ static void kmemleak_scan(void) - */ - rcu_read_lock(); - list_for_each_entry_rcu(object, &object_list, object_list) { -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - if (color_white(object) && (object->flags & OBJECT_ALLOCATED) - && update_checksum(object) && get_object(object)) { - /* color it gray temporarily */ - object->count = object->min_count; - list_add_tail(&object->gray_list, &gray_list); - } -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - } - rcu_read_unlock(); - -@@ -1591,13 +1591,13 @@ static void kmemleak_scan(void) - */ - rcu_read_lock(); - list_for_each_entry_rcu(object, &object_list, object_list) { -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - if (unreferenced_object(object) && - !(object->flags & OBJECT_REPORTED)) { - object->flags |= OBJECT_REPORTED; - new_leaks++; - } -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - } - rcu_read_unlock(); - -@@ -1749,10 +1749,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v) - struct kmemleak_object *object = v; - unsigned long flags; - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) - print_unreferenced(seq, object); -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - return 0; - } - -@@ -1782,9 +1782,9 @@ static int dump_str_object_info(const char *str) - return -EINVAL; - } - -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - dump_object_info(object); -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - - put_object(object); - return 0; -@@ -1803,11 +1803,11 @@ static void kmemleak_clear(void) - - rcu_read_lock(); - list_for_each_entry_rcu(object, &object_list, object_list) { -- spin_lock_irqsave(&object->lock, flags); -+ raw_spin_lock_irqsave(&object->lock, flags); - if ((object->flags & OBJECT_REPORTED) && - unreferenced_object(object)) - __paint_it(object, KMEMLEAK_GREY); -- spin_unlock_irqrestore(&object->lock, flags); -+ raw_spin_unlock_irqrestore(&object->lock, flags); - } - rcu_read_unlock(); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch b/kernel/patches-4.19.x-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch deleted file mode 100644 index a67ae7c62..000000000 --- a/kernel/patches-4.19.x-rt/0311-sched-migrate_enable-Use-select_fallback_rq.patch +++ /dev/null @@ -1,65 +0,0 @@ -From 84ff174f45e1a55c38d87b15d8d35cccc0bc9d93 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Sat, 12 Oct 2019 01:52:12 -0500 -Subject: [PATCH 311/328] sched: migrate_enable: Use select_fallback_rq() - -[ Upstream commit adfa969d4cfcc995a9d866020124e50f1827d2d1 ] - -migrate_enable() currently open-codes a variant of select_fallback_rq(). -However, it does not have the "No more Mr. Nice Guy" fallback and thus -it will pass an invalid CPU to the migration thread if cpus_mask only -contains a CPU that is !active. - -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 25 ++++++++++--------------- - 1 file changed, 10 insertions(+), 15 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index d9a3f88508ee..6fd3f7b4d7d8 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7335,6 +7335,7 @@ void migrate_enable(void) - if (p->migrate_disable_update) { - struct rq *rq; - struct rq_flags rf; -+ int cpu = task_cpu(p); - - rq = task_rq_lock(p, &rf); - update_rq_clock(rq); -@@ -7344,21 +7345,15 @@ void migrate_enable(void) - - p->migrate_disable_update = 0; - -- WARN_ON(smp_processor_id() != task_cpu(p)); -- if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { -- const struct cpumask *cpu_valid_mask = cpu_active_mask; -- struct migration_arg arg; -- unsigned int dest_cpu; -- -- if (p->flags & PF_KTHREAD) { -- /* -- * Kernel threads are allowed on online && !active CPUs -- */ -- cpu_valid_mask = cpu_online_mask; -- } -- dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask); -- arg.task = p; -- arg.dest_cpu = dest_cpu; -+ WARN_ON(smp_processor_id() != cpu); -+ if (!cpumask_test_cpu(cpu, &p->cpus_mask)) { -+ struct migration_arg arg = { p }; -+ struct rq_flags rf; -+ -+ rq = task_rq_lock(p, &rf); -+ update_rq_clock(rq); -+ arg.dest_cpu = select_fallback_rq(cpu, p); -+ task_rq_unlock(rq, p, &rf); - - unpin_current_cpu(); - preempt_lazy_enable(); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0312-sched-Lazy-migrate_disable-processing.patch b/kernel/patches-4.19.x-rt/0312-sched-Lazy-migrate_disable-processing.patch deleted file mode 100644 index 5d0099e77..000000000 --- a/kernel/patches-4.19.x-rt/0312-sched-Lazy-migrate_disable-processing.patch +++ /dev/null @@ -1,615 +0,0 @@ -From 56caa7a47c76572041391ca1b27a68b637539aa3 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Sat, 12 Oct 2019 01:52:13 -0500 -Subject: [PATCH 312/328] sched: Lazy migrate_disable processing - -[ Upstream commit 425c5b38779a860062aa62219dc920d374b13c17 ] - -Avoid overhead on the majority of migrate disable/enable sequences by -only manipulating scheduler data (and grabbing the relevant locks) when -the task actually schedules while migrate-disabled. A kernel build -showed around a 10% reduction in system time (with CONFIG_NR_CPUS=512). - -Instead of cpuhp_pin_lock, CPU hotplug is handled by keeping a per-CPU -count of the number of pinned tasks (including tasks which have not -scheduled in the migrate-disabled section); takedown_cpu() will -wait until that reaches zero (confirmed by take_cpu_down() in stop -machine context to deal with races) before migrating tasks off of the -cpu. - -To simplify synchronization, updating cpus_mask is no longer deferred -until migrate_enable(). This lets us not have to worry about -migrate_enable() missing the update if it's on the fast path (didn't -schedule during the migrate disabled section). It also makes the code -a bit simpler and reduces deviation from mainline. - -While the main motivation for this is the performance benefit, lazy -migrate disable also eliminates the restriction on calling -migrate_disable() while atomic but leaving the atomic region prior to -calling migrate_enable() -- though this won't help with local_bh_disable() -(and thus rcutorture) unless something similar is done with the recently -added local_lock. - -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - include/linux/cpu.h | 4 - - include/linux/sched.h | 11 +-- - init/init_task.c | 4 + - kernel/cpu.c | 103 +++++++++-------------- - kernel/sched/core.c | 182 +++++++++++++++++------------------------ - kernel/sched/sched.h | 4 + - lib/smp_processor_id.c | 3 + - 7 files changed, 129 insertions(+), 182 deletions(-) - -diff --git a/include/linux/cpu.h b/include/linux/cpu.h -index e67645924404..87347ccbba0c 100644 ---- a/include/linux/cpu.h -+++ b/include/linux/cpu.h -@@ -118,8 +118,6 @@ extern void cpu_hotplug_disable(void); - extern void cpu_hotplug_enable(void); - void clear_tasks_mm_cpumask(int cpu); - int cpu_down(unsigned int cpu); --extern void pin_current_cpu(void); --extern void unpin_current_cpu(void); - - #else /* CONFIG_HOTPLUG_CPU */ - -@@ -131,8 +129,6 @@ static inline int cpus_read_trylock(void) { return true; } - static inline void lockdep_assert_cpus_held(void) { } - static inline void cpu_hotplug_disable(void) { } - static inline void cpu_hotplug_enable(void) { } --static inline void pin_current_cpu(void) { } --static inline void unpin_current_cpu(void) { } - - #endif /* !CONFIG_HOTPLUG_CPU */ - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 854a6cb456af..60ac271472aa 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -227,6 +227,8 @@ extern void io_schedule_finish(int token); - extern long io_schedule_timeout(long timeout); - extern void io_schedule(void); - -+int cpu_nr_pinned(int cpu); -+ - /** - * struct prev_cputime - snapshot of system and user cputime - * @utime: time spent in user mode -@@ -670,16 +672,13 @@ struct task_struct { - cpumask_t cpus_mask; - #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - int migrate_disable; -- int migrate_disable_update; -- int pinned_on_cpu; -+ bool migrate_disable_scheduled; - # ifdef CONFIG_SCHED_DEBUG -- int migrate_disable_atomic; -+ int pinned_on_cpu; - # endif -- - #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - # ifdef CONFIG_SCHED_DEBUG - int migrate_disable; -- int migrate_disable_atomic; - # endif - #endif - #ifdef CONFIG_PREEMPT_RT_FULL -@@ -2058,4 +2057,6 @@ static inline void rseq_syscall(struct pt_regs *regs) - - #endif - -+extern struct task_struct *takedown_cpu_task; -+ - #endif -diff --git a/init/init_task.c b/init/init_task.c -index 9e3362748214..4e5af4616dbd 100644 ---- a/init/init_task.c -+++ b/init/init_task.c -@@ -80,6 +80,10 @@ struct task_struct init_task - .cpus_ptr = &init_task.cpus_mask, - .cpus_mask = CPU_MASK_ALL, - .nr_cpus_allowed= NR_CPUS, -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) && \ -+ defined(CONFIG_SCHED_DEBUG) -+ .pinned_on_cpu = -1, -+#endif - .mm = NULL, - .active_mm = &init_mm, - .restart_block = { -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 0d50ae262898..2834eed32b55 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -75,11 +75,6 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { - .fail = CPUHP_INVALID, - }; - --#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL) --static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ -- __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); --#endif -- - #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) - static struct lockdep_map cpuhp_state_up_map = - STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); -@@ -286,57 +281,6 @@ static int cpu_hotplug_disabled; - - #ifdef CONFIG_HOTPLUG_CPU - --/** -- * pin_current_cpu - Prevent the current cpu from being unplugged -- */ --void pin_current_cpu(void) --{ --#ifdef CONFIG_PREEMPT_RT_FULL -- struct rt_rw_lock *cpuhp_pin; -- unsigned int cpu; -- int ret; -- --again: -- cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); -- ret = __read_rt_trylock(cpuhp_pin); -- if (ret) { -- current->pinned_on_cpu = smp_processor_id(); -- return; -- } -- cpu = smp_processor_id(); -- preempt_lazy_enable(); -- preempt_enable(); -- -- sleeping_lock_inc(); -- __read_rt_lock(cpuhp_pin); -- sleeping_lock_dec(); -- -- preempt_disable(); -- preempt_lazy_disable(); -- if (cpu != smp_processor_id()) { -- __read_rt_unlock(cpuhp_pin); -- goto again; -- } -- current->pinned_on_cpu = cpu; --#endif --} -- --/** -- * unpin_current_cpu - Allow unplug of current cpu -- */ --void unpin_current_cpu(void) --{ --#ifdef CONFIG_PREEMPT_RT_FULL -- struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); -- -- if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) -- cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, current->pinned_on_cpu); -- -- current->pinned_on_cpu = -1; -- __read_rt_unlock(cpuhp_pin); --#endif --} -- - DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); - - void cpus_read_lock(void) -@@ -865,6 +809,15 @@ static int take_cpu_down(void *_param) - int err, cpu = smp_processor_id(); - int ret; - -+#ifdef CONFIG_PREEMPT_RT_BASE -+ /* -+ * If any tasks disabled migration before we got here, -+ * go back and sleep again. -+ */ -+ if (cpu_nr_pinned(cpu)) -+ return -EAGAIN; -+#endif -+ - /* Ensure this CPU doesn't handle any more interrupts. */ - err = __cpu_disable(); - if (err < 0) -@@ -892,11 +845,10 @@ static int take_cpu_down(void *_param) - return 0; - } - -+struct task_struct *takedown_cpu_task; -+ - static int takedown_cpu(unsigned int cpu) - { --#ifdef CONFIG_PREEMPT_RT_FULL -- struct rt_rw_lock *cpuhp_pin = per_cpu_ptr(&cpuhp_pin_lock, cpu); --#endif - struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); - int err; - -@@ -909,17 +861,38 @@ static int takedown_cpu(unsigned int cpu) - */ - irq_lock_sparse(); - --#ifdef CONFIG_PREEMPT_RT_FULL -- __write_rt_lock(cpuhp_pin); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ WARN_ON_ONCE(takedown_cpu_task); -+ takedown_cpu_task = current; -+ -+again: -+ /* -+ * If a task pins this CPU after we pass this check, take_cpu_down -+ * will return -EAGAIN. -+ */ -+ for (;;) { -+ int nr_pinned; -+ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ nr_pinned = cpu_nr_pinned(cpu); -+ if (nr_pinned == 0) -+ break; -+ schedule(); -+ } -+ set_current_state(TASK_RUNNING); - #endif - - /* - * So now all preempt/rcu users must observe !cpu_active(). - */ - err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ if (err == -EAGAIN) -+ goto again; -+#endif - if (err) { --#ifdef CONFIG_PREEMPT_RT_FULL -- __write_rt_unlock(cpuhp_pin); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ takedown_cpu_task = NULL; - #endif - /* CPU refused to die */ - irq_unlock_sparse(); -@@ -939,8 +912,8 @@ static int takedown_cpu(unsigned int cpu) - wait_for_ap_thread(st, false); - BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); - --#ifdef CONFIG_PREEMPT_RT_FULL -- __write_rt_unlock(cpuhp_pin); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ takedown_cpu_task = NULL; - #endif - /* Interrupts are moved away from the dying cpu, reenable alloc/free */ - irq_unlock_sparse(); -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 6fd3f7b4d7d8..e97ac751aad2 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1065,7 +1065,8 @@ static int migration_cpu_stop(void *data) - void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) - { - cpumask_copy(&p->cpus_mask, new_mask); -- p->nr_cpus_allowed = cpumask_weight(new_mask); -+ if (p->cpus_ptr == &p->cpus_mask) -+ p->nr_cpus_allowed = cpumask_weight(new_mask); - } - - #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -@@ -1076,8 +1077,7 @@ int __migrate_disabled(struct task_struct *p) - EXPORT_SYMBOL_GPL(__migrate_disabled); - #endif - --static void __do_set_cpus_allowed_tail(struct task_struct *p, -- const struct cpumask *new_mask) -+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) - { - struct rq *rq = task_rq(p); - bool queued, running; -@@ -1106,20 +1106,6 @@ static void __do_set_cpus_allowed_tail(struct task_struct *p, - set_curr_task(rq, p); - } - --void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) --{ --#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -- if (__migrate_disabled(p)) { -- lockdep_assert_held(&p->pi_lock); -- -- cpumask_copy(&p->cpus_mask, new_mask); -- p->migrate_disable_update = 1; -- return; -- } --#endif -- __do_set_cpus_allowed_tail(p, new_mask); --} -- - /* - * Change a given task's CPU affinity. Migrate the thread to a - * proper CPU and schedule it away if the CPU it's executing on -@@ -1179,7 +1165,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, - } - - /* Can the task run on the task's current CPU? If so, we're done */ -- if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) -+ if (cpumask_test_cpu(task_cpu(p), new_mask) || -+ p->cpus_ptr != &p->cpus_mask) - goto out; - - if (task_running(rq, p) || p->state == TASK_WAKING) { -@@ -3454,6 +3441,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) - BUG(); - } - -+static void migrate_disabled_sched(struct task_struct *p); -+ - /* - * __schedule() is the main scheduler function. - * -@@ -3524,6 +3513,9 @@ static void __sched notrace __schedule(bool preempt) - rq_lock(rq, &rf); - smp_mb__after_spinlock(); - -+ if (__migrate_disabled(prev)) -+ migrate_disabled_sched(prev); -+ - /* Promote REQ to ACT */ - rq->clock_update_flags <<= 1; - update_rq_clock(rq); -@@ -5779,6 +5771,8 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) - BUG_ON(!next); - put_prev_task(rq, next); - -+ WARN_ON_ONCE(__migrate_disabled(next)); -+ - /* - * Rules for changing task_struct::cpus_mask are holding - * both pi_lock and rq->lock, such that holding either -@@ -7247,14 +7241,9 @@ update_nr_migratory(struct task_struct *p, long delta) - static inline void - migrate_disable_update_cpus_allowed(struct task_struct *p) - { -- struct rq *rq; -- struct rq_flags rf; -- -- rq = task_rq_lock(p, &rf); - p->cpus_ptr = cpumask_of(smp_processor_id()); - update_nr_migratory(p, -1); - p->nr_cpus_allowed = 1; -- task_rq_unlock(rq, p, &rf); - } - - static inline void -@@ -7272,54 +7261,35 @@ migrate_enable_update_cpus_allowed(struct task_struct *p) - - void migrate_disable(void) - { -- struct task_struct *p = current; -+ preempt_disable(); - -- if (in_atomic() || irqs_disabled()) { -+ if (++current->migrate_disable == 1) { -+ this_rq()->nr_pinned++; -+ preempt_lazy_disable(); - #ifdef CONFIG_SCHED_DEBUG -- p->migrate_disable_atomic++; -+ WARN_ON_ONCE(current->pinned_on_cpu >= 0); -+ current->pinned_on_cpu = smp_processor_id(); - #endif -- return; -- } --#ifdef CONFIG_SCHED_DEBUG -- if (unlikely(p->migrate_disable_atomic)) { -- tracing_off(); -- WARN_ON_ONCE(1); - } --#endif - -- if (p->migrate_disable) { -- p->migrate_disable++; -- return; -- } -+ preempt_enable(); -+} -+EXPORT_SYMBOL(migrate_disable); - -- preempt_disable(); -- preempt_lazy_disable(); -- pin_current_cpu(); -+static void migrate_disabled_sched(struct task_struct *p) -+{ -+ if (p->migrate_disable_scheduled) -+ return; - - migrate_disable_update_cpus_allowed(p); -- p->migrate_disable = 1; -- -- preempt_enable(); -+ p->migrate_disable_scheduled = 1; - } --EXPORT_SYMBOL(migrate_disable); - - void migrate_enable(void) - { - struct task_struct *p = current; -- -- if (in_atomic() || irqs_disabled()) { --#ifdef CONFIG_SCHED_DEBUG -- p->migrate_disable_atomic--; --#endif -- return; -- } -- --#ifdef CONFIG_SCHED_DEBUG -- if (unlikely(p->migrate_disable_atomic)) { -- tracing_off(); -- WARN_ON_ONCE(1); -- } --#endif -+ struct rq *rq = this_rq(); -+ int cpu = task_cpu(p); - - WARN_ON_ONCE(p->migrate_disable <= 0); - if (p->migrate_disable > 1) { -@@ -7329,67 +7299,69 @@ void migrate_enable(void) - - preempt_disable(); - -+#ifdef CONFIG_SCHED_DEBUG -+ WARN_ON_ONCE(current->pinned_on_cpu != cpu); -+ current->pinned_on_cpu = -1; -+#endif -+ -+ WARN_ON_ONCE(rq->nr_pinned < 1); -+ - p->migrate_disable = 0; -+ rq->nr_pinned--; -+ if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && -+ takedown_cpu_task) -+ wake_up_process(takedown_cpu_task); -+ -+ if (!p->migrate_disable_scheduled) -+ goto out; -+ -+ p->migrate_disable_scheduled = 0; -+ - migrate_enable_update_cpus_allowed(p); - -- if (p->migrate_disable_update) { -- struct rq *rq; -+ WARN_ON(smp_processor_id() != cpu); -+ if (!is_cpu_allowed(p, cpu)) { -+ struct migration_arg arg = { p }; - struct rq_flags rf; -- int cpu = task_cpu(p); - - rq = task_rq_lock(p, &rf); - update_rq_clock(rq); -- -- __do_set_cpus_allowed_tail(p, &p->cpus_mask); -+ arg.dest_cpu = select_fallback_rq(cpu, p); - task_rq_unlock(rq, p, &rf); - -- p->migrate_disable_update = 0; -- -- WARN_ON(smp_processor_id() != cpu); -- if (!cpumask_test_cpu(cpu, &p->cpus_mask)) { -- struct migration_arg arg = { p }; -- struct rq_flags rf; -+ preempt_lazy_enable(); -+ preempt_enable(); - -- rq = task_rq_lock(p, &rf); -- update_rq_clock(rq); -- arg.dest_cpu = select_fallback_rq(cpu, p); -- task_rq_unlock(rq, p, &rf); -- -- unpin_current_cpu(); -- preempt_lazy_enable(); -- preempt_enable(); -- -- sleeping_lock_inc(); -- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); -- sleeping_lock_dec(); -- tlb_migrate_finish(p->mm); -+ sleeping_lock_inc(); -+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); -+ sleeping_lock_dec(); -+ tlb_migrate_finish(p->mm); - -- return; -- } -+ return; - } -- unpin_current_cpu(); -+ -+out: - preempt_lazy_enable(); - preempt_enable(); - } - EXPORT_SYMBOL(migrate_enable); - --#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) --void migrate_disable(void) -+int cpu_nr_pinned(int cpu) - { --#ifdef CONFIG_SCHED_DEBUG -- struct task_struct *p = current; -+ struct rq *rq = cpu_rq(cpu); - -- if (in_atomic() || irqs_disabled()) { -- p->migrate_disable_atomic++; -- return; -- } -+ return rq->nr_pinned; -+} - -- if (unlikely(p->migrate_disable_atomic)) { -- tracing_off(); -- WARN_ON_ONCE(1); -- } -+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -+static void migrate_disabled_sched(struct task_struct *p) -+{ -+} - -- p->migrate_disable++; -+void migrate_disable(void) -+{ -+#ifdef CONFIG_SCHED_DEBUG -+ current->migrate_disable++; - #endif - barrier(); - } -@@ -7400,20 +7372,14 @@ void migrate_enable(void) - #ifdef CONFIG_SCHED_DEBUG - struct task_struct *p = current; - -- if (in_atomic() || irqs_disabled()) { -- p->migrate_disable_atomic--; -- return; -- } -- -- if (unlikely(p->migrate_disable_atomic)) { -- tracing_off(); -- WARN_ON_ONCE(1); -- } -- - WARN_ON_ONCE(p->migrate_disable <= 0); - p->migrate_disable--; - #endif - barrier(); - } - EXPORT_SYMBOL(migrate_enable); -+#else -+static void migrate_disabled_sched(struct task_struct *p) -+{ -+} - #endif -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index c90574112bca..78fa5911dd55 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -913,6 +913,10 @@ struct rq { - /* Must be inspected within a rcu lock section */ - struct cpuidle_state *idle_state; - #endif -+ -+#if defined(CONFIG_PREEMPT_RT_BASE) && defined(CONFIG_SMP) -+ int nr_pinned; -+#endif - }; - - static inline int cpu_of(struct rq *rq) -diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c -index b8a8a8db2d75..0c80992aa337 100644 ---- a/lib/smp_processor_id.c -+++ b/lib/smp_processor_id.c -@@ -22,6 +22,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1, - * Kernel threads bound to a single CPU can safely use - * smp_processor_id(): - */ -+ if (current->migrate_disable) -+ goto out; -+ - if (current->nr_cpus_allowed == 1) - goto out; - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch b/kernel/patches-4.19.x-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch deleted file mode 100644 index ce7fa3f32..000000000 --- a/kernel/patches-4.19.x-rt/0313-sched-migrate_enable-Use-stop_one_cpu_nowait.patch +++ /dev/null @@ -1,120 +0,0 @@ -From 1e6b41517e56302ffcac82be49722031f005e97c Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Sat, 12 Oct 2019 01:52:14 -0500 -Subject: [PATCH 313/328] sched: migrate_enable: Use stop_one_cpu_nowait() - -[ Upstream commit 6b39a1fa8c53cae08dc03afdae193b7d3a78a173 ] - -migrate_enable() can be called with current->state != TASK_RUNNING. -Avoid clobbering the existing state by using stop_one_cpu_nowait(). -Since we're stopping the current cpu, we know that we won't get -past __schedule() until migration_cpu_stop() has run (at least up to -the point of migrating us to another cpu). - -Signed-off-by: Scott Wood -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - include/linux/stop_machine.h | 2 ++ - kernel/sched/core.c | 23 +++++++++++++---------- - kernel/stop_machine.c | 7 +++++-- - 3 files changed, 20 insertions(+), 12 deletions(-) - -diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h -index 6d3635c86dbe..82fc686ddd9e 100644 ---- a/include/linux/stop_machine.h -+++ b/include/linux/stop_machine.h -@@ -26,6 +26,8 @@ struct cpu_stop_work { - cpu_stop_fn_t fn; - void *arg; - struct cpu_stop_done *done; -+ /* Did not run due to disabled stopper; for nowait debug checks */ -+ bool disabled; - }; - - int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index e97ac751aad2..e465381b464d 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -990,6 +990,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, - struct migration_arg { - struct task_struct *task; - int dest_cpu; -+ bool done; - }; - - /* -@@ -1025,6 +1026,11 @@ static int migration_cpu_stop(void *data) - struct task_struct *p = arg->task; - struct rq *rq = this_rq(); - struct rq_flags rf; -+ int dest_cpu = arg->dest_cpu; -+ -+ /* We don't look at arg after this point. */ -+ smp_mb(); -+ arg->done = true; - - /* - * The original target CPU might have gone down and we might -@@ -1047,9 +1053,9 @@ static int migration_cpu_stop(void *data) - */ - if (task_rq(p) == rq) { - if (task_on_rq_queued(p)) -- rq = __migrate_task(rq, &rf, p, arg->dest_cpu); -+ rq = __migrate_task(rq, &rf, p, dest_cpu); - else -- p->wake_cpu = arg->dest_cpu; -+ p->wake_cpu = dest_cpu; - } - rq_unlock(rq, &rf); - raw_spin_unlock(&p->pi_lock); -@@ -7322,6 +7328,7 @@ void migrate_enable(void) - WARN_ON(smp_processor_id() != cpu); - if (!is_cpu_allowed(p, cpu)) { - struct migration_arg arg = { p }; -+ struct cpu_stop_work work; - struct rq_flags rf; - - rq = task_rq_lock(p, &rf); -@@ -7329,15 +7336,11 @@ void migrate_enable(void) - arg.dest_cpu = select_fallback_rq(cpu, p); - task_rq_unlock(rq, p, &rf); - -- preempt_lazy_enable(); -- preempt_enable(); -- -- sleeping_lock_inc(); -- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); -- sleeping_lock_dec(); -+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, -+ &arg, &work); - tlb_migrate_finish(p->mm); -- -- return; -+ __schedule(true); -+ WARN_ON_ONCE(!arg.done && !work.disabled); - } - - out: -diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c -index 067cb83f37ea..2d15c0d50625 100644 ---- a/kernel/stop_machine.c -+++ b/kernel/stop_machine.c -@@ -86,8 +86,11 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) - enabled = stopper->enabled; - if (enabled) - __cpu_stop_queue_work(stopper, work, &wakeq); -- else if (work->done) -- cpu_stop_signal_done(work->done); -+ else { -+ work->disabled = true; -+ if (work->done) -+ cpu_stop_signal_done(work->done); -+ } - raw_spin_unlock_irqrestore(&stopper->lock, flags); - - wake_up_q(&wakeq); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch b/kernel/patches-4.19.x-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch deleted file mode 100644 index 30110b056..000000000 --- a/kernel/patches-4.19.x-rt/0314-Revert-ARM-Initialize-split-page-table-locks-for-vec.patch +++ /dev/null @@ -1,86 +0,0 @@ -From 782db84c043ebd020312649d976fe1c74178ea34 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 18 Oct 2019 13:04:15 +0200 -Subject: [PATCH 314/328] Revert "ARM: Initialize split page table locks for - vector page" - -[ Upstream commit 247074c44d8c3e619dfde6404a52295d8d671d38 ] - -I'm dropping this patch, with its original description: - -|ARM: Initialize split page table locks for vector page -| -|Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if -|PREEMPT_RT_FULL=y because vectors_user_mapping() creates a -|VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no -|ptl->lock has been allocated for the page. An attempt to coredump -|that page will result in a kernel NULL pointer dereference when -|follow_page() attempts to lock the page. -| -|The call tree to the NULL pointer dereference is: -| -| do_notify_resume() -| get_signal_to_deliver() -| do_coredump() -| elf_core_dump() -| get_dump_page() -| __get_user_pages() -| follow_page() -| pte_offset_map_lock() <----- a #define -| ... -| rt_spin_lock() -| -|The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. - -The patch named mm-shrink-the-page-frame-to-rt-size.patch was dropped -from the RT queue once the SPLIT_PTLOCK_CPUS feature (in a slightly -different shape) went upstream (somewhere between v3.12 and v3.14). - -I can see that the patch still allocates a lock which wasn't there -before. However I can't trigger a kernel oops like described in the -patch by triggering a coredump. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - arch/arm/kernel/process.c | 24 ------------------------ - 1 file changed, 24 deletions(-) - -diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c -index 8d3c7ce34c24..82ab015bf42b 100644 ---- a/arch/arm/kernel/process.c -+++ b/arch/arm/kernel/process.c -@@ -324,30 +324,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) - } - - #ifdef CONFIG_MMU --/* -- * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not -- * initialized by pgtable_page_ctor() then a coredump of the vector page will -- * fail. -- */ --static int __init vectors_user_mapping_init_page(void) --{ -- struct page *page; -- unsigned long addr = 0xffff0000; -- pgd_t *pgd; -- pud_t *pud; -- pmd_t *pmd; -- -- pgd = pgd_offset_k(addr); -- pud = pud_offset(pgd, addr); -- pmd = pmd_offset(pud, addr); -- page = pmd_page(*(pmd)); -- -- pgtable_page_ctor(page); -- -- return 0; --} --late_initcall(vectors_user_mapping_init_page); -- - #ifdef CONFIG_KUSER_HELPERS - /* - * The vectors page is always readable from user space for the --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch b/kernel/patches-4.19.x-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch deleted file mode 100644 index d4a84cf86..000000000 --- a/kernel/patches-4.19.x-rt/0315-locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch +++ /dev/null @@ -1,128 +0,0 @@ -From 14da0cff2bdb4c5c183a38c223f4d0b03532852a Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 19 Nov 2019 09:25:04 +0100 -Subject: [PATCH 315/328] locking: Make spinlock_t and rwlock_t a RCU section - on RT - -[ Upstream commit 84440022a0e1c8c936d61f8f97593674a295d409 ] - -On !RT a locked spinlock_t and rwlock_t disables preemption which -implies a RCU read section. There is code that relies on that behaviour. - -Add an explicit RCU read section on RT while a sleeping lock (a lock -which would disables preemption on !RT) acquired. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/locking/rtmutex.c | 6 ++++++ - kernel/locking/rwlock-rt.c | 6 ++++++ - 2 files changed, 12 insertions(+) - -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c -index 63b3d6f306fa..c7d3ae01b4e5 100644 ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1142,6 +1142,7 @@ void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) - void __lockfunc rt_spin_lock(spinlock_t *lock) - { - sleeping_lock_inc(); -+ rcu_read_lock(); - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -@@ -1157,6 +1158,7 @@ void __lockfunc __rt_spin_lock(struct rt_mutex *lock) - void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) - { - sleeping_lock_inc(); -+ rcu_read_lock(); - migrate_disable(); - spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); -@@ -1170,6 +1172,7 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock) - spin_release(&lock->dep_map, 1, _RET_IP_); - rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); - migrate_enable(); -+ rcu_read_unlock(); - sleeping_lock_dec(); - } - EXPORT_SYMBOL(rt_spin_unlock); -@@ -1201,6 +1204,7 @@ int __lockfunc rt_spin_trylock(spinlock_t *lock) - ret = __rt_mutex_trylock(&lock->lock); - if (ret) { - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ rcu_read_lock(); - } else { - migrate_enable(); - sleeping_lock_dec(); -@@ -1217,6 +1221,7 @@ int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) - ret = __rt_mutex_trylock(&lock->lock); - if (ret) { - sleeping_lock_inc(); -+ rcu_read_lock(); - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } else -@@ -1233,6 +1238,7 @@ int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) - ret = __rt_mutex_trylock(&lock->lock); - if (ret) { - sleeping_lock_inc(); -+ rcu_read_lock(); - migrate_disable(); - spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); - } -diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c -index c3b91205161c..0ae8c62ea832 100644 ---- a/kernel/locking/rwlock-rt.c -+++ b/kernel/locking/rwlock-rt.c -@@ -310,6 +310,7 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) - ret = do_read_rt_trylock(rwlock); - if (ret) { - rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); -+ rcu_read_lock(); - } else { - migrate_enable(); - sleeping_lock_dec(); -@@ -327,6 +328,7 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock) - ret = do_write_rt_trylock(rwlock); - if (ret) { - rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -+ rcu_read_lock(); - } else { - migrate_enable(); - sleeping_lock_dec(); -@@ -338,6 +340,7 @@ EXPORT_SYMBOL(rt_write_trylock); - void __lockfunc rt_read_lock(rwlock_t *rwlock) - { - sleeping_lock_inc(); -+ rcu_read_lock(); - migrate_disable(); - rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); - do_read_rt_lock(rwlock); -@@ -347,6 +350,7 @@ EXPORT_SYMBOL(rt_read_lock); - void __lockfunc rt_write_lock(rwlock_t *rwlock) - { - sleeping_lock_inc(); -+ rcu_read_lock(); - migrate_disable(); - rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); - do_write_rt_lock(rwlock); -@@ -358,6 +362,7 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock) - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - do_read_rt_unlock(rwlock); - migrate_enable(); -+ rcu_read_unlock(); - sleeping_lock_dec(); - } - EXPORT_SYMBOL(rt_read_unlock); -@@ -367,6 +372,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock) - rwlock_release(&rwlock->dep_map, 1, _RET_IP_); - do_write_rt_unlock(rwlock); - migrate_enable(); -+ rcu_read_unlock(); - sleeping_lock_dec(); - } - EXPORT_SYMBOL(rt_write_unlock); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch b/kernel/patches-4.19.x-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch deleted file mode 100644 index fa6151b8b..000000000 --- a/kernel/patches-4.19.x-rt/0316-sched-core-migrate_enable-must-access-takedown_cpu_t.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 459ffc40b777727525726563ee2522a82ecfa12c Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 29 Nov 2019 17:24:55 +0100 -Subject: [PATCH 316/328] sched/core: migrate_enable() must access - takedown_cpu_task on !HOTPLUG_CPU - -[ Upstream commit a61d1977f692e46bad99a100f264981ba08cb4bd ] - -The variable takedown_cpu_task is never declared/used on !HOTPLUG_CPU -except for migrate_enable(). This leads to a link error. - -Don't use takedown_cpu_task in !HOTPLUG_CPU. - -Reported-by: Dick Hollenbeck -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/cpu.c | 2 ++ - kernel/sched/core.c | 2 ++ - 2 files changed, 4 insertions(+) - -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 2834eed32b55..d28254597f59 100644 ---- a/kernel/cpu.c -+++ b/kernel/cpu.c -@@ -845,7 +845,9 @@ static int take_cpu_down(void *_param) - return 0; - } - -+#ifdef CONFIG_PREEMPT_RT_BASE - struct task_struct *takedown_cpu_task; -+#endif - - static int takedown_cpu(unsigned int cpu) - { -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index e465381b464d..cbd76324babd 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7314,9 +7314,11 @@ void migrate_enable(void) - - p->migrate_disable = 0; - rq->nr_pinned--; -+#ifdef CONFIG_HOTPLUG_CPU - if (rq->nr_pinned == 0 && unlikely(!cpu_active(cpu)) && - takedown_cpu_task) - wake_up_process(takedown_cpu_task); -+#endif - - if (!p->migrate_disable_scheduled) - goto out; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch b/kernel/patches-4.19.x-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch deleted file mode 100644 index 10526b18c..000000000 --- a/kernel/patches-4.19.x-rt/0317-lib-smp_processor_id-Adjust-check_preemption_disable.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 9280cd27ed3c962fd0a1c6b2337bdbdd097a7466 Mon Sep 17 00:00:00 2001 -From: Daniel Wagner -Date: Mon, 16 Dec 2019 16:15:57 +0100 -Subject: [PATCH 317/328] lib/smp_processor_id: Adjust - check_preemption_disabled() - -[ Upstream commit af3c1c5fdf177870fb5e6e16b24e374696ab28f5 ] - -The current->migrate_disable counter is not always defined leading to -build failures with DEBUG_PREEMPT && !PREEMPT_RT_BASE. - -Restrict the access to ->migrate_disable to same set where -->migrate_disable is modified. - -Signed-off-by: Daniel Wagner -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: adjust condition + description] -Signed-off-by: Sebastian Andrzej Siewior ---- - lib/smp_processor_id.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c -index 0c80992aa337..2e7398534b66 100644 ---- a/lib/smp_processor_id.c -+++ b/lib/smp_processor_id.c -@@ -22,8 +22,10 @@ notrace static unsigned int check_preemption_disabled(const char *what1, - * Kernel threads bound to a single CPU can safely use - * smp_processor_id(): - */ -+#if defined(CONFIG_PREEMPT_RT_BASE) && (defined(CONFIG_SMP) || defined(CONFIG_SCHED_DEBUG)) - if (current->migrate_disable) - goto out; -+#endif - - if (current->nr_cpus_allowed == 1) - goto out; --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch b/kernel/patches-4.19.x-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch deleted file mode 100644 index a6e33b48a..000000000 --- a/kernel/patches-4.19.x-rt/0318-sched-migrate_enable-Busy-loop-until-the-migration-r.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 7c90df1e8e20e69467cf37d9fafb5a6993f7bf65 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Thu, 12 Dec 2019 10:53:59 +0100 -Subject: [PATCH 318/328] sched: migrate_enable: Busy loop until the migration - request is completed - -[ Upstream commit 140d7f54a5fff02898d2ca9802b39548bf7455f1 ] - -If user task changes the CPU affinity mask of a running task it will -dispatch migration request if the current CPU is no longer allowed. This -might happen shortly before a task enters a migrate_disable() section. -Upon leaving the migrate_disable() section, the task will notice that -the current CPU is no longer allowed and will will dispatch its own -migration request to move it off the current CPU. -While invoking __schedule() the first migration request will be -processed and the task returns on the "new" CPU with "arg.done = 0". Its -own migration request will be processed shortly after and will result in -memory corruption if the stack memory, designed for request, was used -otherwise in the meantime. - -Spin until the migration request has been processed if it was accepted. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index cbd76324babd..4616c086dd26 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7329,7 +7329,7 @@ void migrate_enable(void) - - WARN_ON(smp_processor_id() != cpu); - if (!is_cpu_allowed(p, cpu)) { -- struct migration_arg arg = { p }; -+ struct migration_arg arg = { .task = p }; - struct cpu_stop_work work; - struct rq_flags rf; - -@@ -7342,7 +7342,10 @@ void migrate_enable(void) - &arg, &work); - tlb_migrate_finish(p->mm); - __schedule(true); -- WARN_ON_ONCE(!arg.done && !work.disabled); -+ if (!work.disabled) { -+ while (!arg.done) -+ cpu_relax(); -+ } - } - - out: --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0319-userfaultfd-Use-a-seqlock-instead-of-seqcount.patch b/kernel/patches-4.19.x-rt/0319-userfaultfd-Use-a-seqlock-instead-of-seqcount.patch deleted file mode 100644 index c8632febf..000000000 --- a/kernel/patches-4.19.x-rt/0319-userfaultfd-Use-a-seqlock-instead-of-seqcount.patch +++ /dev/null @@ -1,79 +0,0 @@ -From cb67e79f0923a4a0a62dbedf1d5708586aa67d22 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Wed, 18 Dec 2019 12:25:09 +0100 -Subject: [PATCH 319/328] userfaultfd: Use a seqlock instead of seqcount - -[ Upstream commit dc952a564d02997330654be9628bbe97ba2a05d3 ] - -On RT write_seqcount_begin() disables preemption which leads to warning -in add_wait_queue() while the spinlock_t is acquired. -The waitqueue can't be converted to swait_queue because -userfaultfd_wake_function() is used as a custom wake function. - -Use seqlock instead seqcount to avoid the preempt_disable() section -during add_wait_queue(). - -Cc: stable-rt@vger.kernel.org -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - fs/userfaultfd.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c -index d269d1139f7f..ff6be687f68e 100644 ---- a/fs/userfaultfd.c -+++ b/fs/userfaultfd.c -@@ -61,7 +61,7 @@ struct userfaultfd_ctx { - /* waitqueue head for events */ - wait_queue_head_t event_wqh; - /* a refile sequence protected by fault_pending_wqh lock */ -- struct seqcount refile_seq; -+ seqlock_t refile_seq; - /* pseudo fd refcounting */ - atomic_t refcount; - /* userfaultfd syscall flags */ -@@ -1064,7 +1064,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, - * waitqueue could become empty if this is the - * only userfault. - */ -- write_seqcount_begin(&ctx->refile_seq); -+ write_seqlock(&ctx->refile_seq); - - /* - * The fault_pending_wqh.lock prevents the uwq -@@ -1090,7 +1090,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, - list_del(&uwq->wq.entry); - add_wait_queue(&ctx->fault_wqh, &uwq->wq); - -- write_seqcount_end(&ctx->refile_seq); -+ write_sequnlock(&ctx->refile_seq); - - /* careful to always initialize msg if ret == 0 */ - *msg = uwq->msg; -@@ -1263,11 +1263,11 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, - * sure we've userfaults to wake. - */ - do { -- seq = read_seqcount_begin(&ctx->refile_seq); -+ seq = read_seqbegin(&ctx->refile_seq); - need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || - waitqueue_active(&ctx->fault_wqh); - cond_resched(); -- } while (read_seqcount_retry(&ctx->refile_seq, seq)); -+ } while (read_seqretry(&ctx->refile_seq, seq)); - if (need_wakeup) - __wake_userfault(ctx, range); - } -@@ -1938,7 +1938,7 @@ static void init_once_userfaultfd_ctx(void *mem) - init_waitqueue_head(&ctx->fault_wqh); - init_waitqueue_head(&ctx->event_wqh); - init_waitqueue_head(&ctx->fd_wqh); -- seqcount_init(&ctx->refile_seq); -+ seqlock_init(&ctx->refile_seq); - } - - SYSCALL_DEFINE1(userfaultfd, int, flags) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0320-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch b/kernel/patches-4.19.x-rt/0320-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch deleted file mode 100644 index 2fe467b80..000000000 --- a/kernel/patches-4.19.x-rt/0320-sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch +++ /dev/null @@ -1,82 +0,0 @@ -From ed060a7dea70ea820b7816dac901e12ad9e40c8a Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Fri, 24 Jan 2020 06:11:46 -0500 -Subject: [PATCH 320/328] sched: migrate_enable: Use per-cpu cpu_stop_work - -[ Upstream commit 2dcd94b443c5dcbc20281666321b7f025f9cc85c ] - -Commit e6c287b1512d ("sched: migrate_enable: Use stop_one_cpu_nowait()") -adds a busy wait to deal with an edge case where the migrated thread -can resume running on another CPU before the stopper has consumed -cpu_stop_work. However, this is done with preemption disabled and can -potentially lead to deadlock. - -While it is not guaranteed that the cpu_stop_work will be consumed before -the migrating thread resumes and exits the stack frame, it is guaranteed -that nothing other than the stopper can run on the old cpu between the -migrating thread scheduling out and the cpu_stop_work being consumed. -Thus, we can store cpu_stop_work in per-cpu data without it being -reused too early. - -Fixes: e6c287b1512d ("sched: migrate_enable: Use stop_one_cpu_nowait()") -Suggested-by: Sebastian Andrzej Siewior -Signed-off-by: Scott Wood -Reviewed-by: Steven Rostedt (VMware) -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 22 ++++++++++++++-------- - 1 file changed, 14 insertions(+), 8 deletions(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 4616c086dd26..c4290fa5c0b6 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7291,6 +7291,9 @@ static void migrate_disabled_sched(struct task_struct *p) - p->migrate_disable_scheduled = 1; - } - -+static DEFINE_PER_CPU(struct cpu_stop_work, migrate_work); -+static DEFINE_PER_CPU(struct migration_arg, migrate_arg); -+ - void migrate_enable(void) - { - struct task_struct *p = current; -@@ -7329,23 +7332,26 @@ void migrate_enable(void) - - WARN_ON(smp_processor_id() != cpu); - if (!is_cpu_allowed(p, cpu)) { -- struct migration_arg arg = { .task = p }; -- struct cpu_stop_work work; -+ struct migration_arg __percpu *arg; -+ struct cpu_stop_work __percpu *work; - struct rq_flags rf; - -+ work = this_cpu_ptr(&migrate_work); -+ arg = this_cpu_ptr(&migrate_arg); -+ WARN_ON_ONCE(!arg->done && !work->disabled && work->arg); -+ -+ arg->task = p; -+ arg->done = false; -+ - rq = task_rq_lock(p, &rf); - update_rq_clock(rq); -- arg.dest_cpu = select_fallback_rq(cpu, p); -+ arg->dest_cpu = select_fallback_rq(cpu, p); - task_rq_unlock(rq, p, &rf); - - stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, -- &arg, &work); -+ arg, work); - tlb_migrate_finish(p->mm); - __schedule(true); -- if (!work.disabled) { -- while (!arg.done) -- cpu_relax(); -- } - } - - out: --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0321-sched-migrate_enable-Remove-__schedule-call.patch b/kernel/patches-4.19.x-rt/0321-sched-migrate_enable-Remove-__schedule-call.patch deleted file mode 100644 index bf41716d2..000000000 --- a/kernel/patches-4.19.x-rt/0321-sched-migrate_enable-Remove-__schedule-call.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 5c7f33dce6217b1e9903dbc0b05492b13a1be8c8 Mon Sep 17 00:00:00 2001 -From: Scott Wood -Date: Fri, 24 Jan 2020 06:11:47 -0500 -Subject: [PATCH 321/328] sched: migrate_enable: Remove __schedule() call - -[ Upstream commit b8162e61e9a33bd1de6452eb838fbf50a93ddd9a ] - -We can rely on preempt_enable() to schedule. Besides simplifying the -code, this potentially allows sequences such as the following to be -permitted: - -migrate_disable(); -preempt_disable(); -migrate_enable(); -preempt_enable(); - -Suggested-by: Sebastian Andrzej Siewior -Signed-off-by: Scott Wood -Reviewed-by: Steven Rostedt (VMware) -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/sched/core.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index c4290fa5c0b6..02e51c74e0bf 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -7351,7 +7351,6 @@ void migrate_enable(void) - stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, - arg, work); - tlb_migrate_finish(p->mm); -- __schedule(true); - } - - out: --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0322-mm-memcontrol-Move-misplaced-local_unlock_irqrestore.patch b/kernel/patches-4.19.x-rt/0322-mm-memcontrol-Move-misplaced-local_unlock_irqrestore.patch deleted file mode 100644 index e8b93ec05..000000000 --- a/kernel/patches-4.19.x-rt/0322-mm-memcontrol-Move-misplaced-local_unlock_irqrestore.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 31f20b92c6fb36429f50063bfab6b642d54013c8 Mon Sep 17 00:00:00 2001 -From: Matt Fleming -Date: Sun, 26 Jan 2020 21:19:45 +0000 -Subject: [PATCH 322/328] mm/memcontrol: Move misplaced - local_unlock_irqrestore() - -[ Upstream commit 071a1d6a6e14d0dec240a8c67b425140d7f92f6a ] - -The comment about local_lock_irqsave() mentions just the counters and -css_put_many()'s callback just invokes a worker so it is safe to move the -unlock function after memcg_check_events() so css_put_many() can be invoked -without the lock acquired. - -Cc: Daniel Wagner -Signed-off-by: Matt Fleming -Signed-off-by: Steven Rostedt (VMware) -[bigeasy: rewrote the patch description] -Signed-off-by: Sebastian Andrzej Siewior ---- - mm/memcontrol.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 421ac74450f6..519528959eef 100644 ---- a/mm/memcontrol.c -+++ b/mm/memcontrol.c -@@ -6540,10 +6540,10 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) - mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), - -nr_entries); - memcg_check_events(memcg, page); -+ local_unlock_irqrestore(event_lock, flags); - - if (!mem_cgroup_is_root(memcg)) - css_put_many(&memcg->css, nr_entries); -- local_unlock_irqrestore(event_lock, flags); - } - - /** --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0323-locallock-Include-header-for-the-current-macro.patch b/kernel/patches-4.19.x-rt/0323-locallock-Include-header-for-the-current-macro.patch deleted file mode 100644 index 13bc0fcc1..000000000 --- a/kernel/patches-4.19.x-rt/0323-locallock-Include-header-for-the-current-macro.patch +++ /dev/null @@ -1,31 +0,0 @@ -From eed4b1765107814294260e3f2a1838a7837efffa Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Tue, 4 Feb 2020 13:29:50 +0100 -Subject: [PATCH 323/328] locallock: Include header for the `current' macro - -[ Upstream commit e693075a5fd852043fa8d2b0467e078d9e5cb782 ] - -Include the header for `current' macro so that -CONFIG_KERNEL_HEADER_TEST=y passes. - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - include/linux/locallock.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/include/linux/locallock.h b/include/linux/locallock.h -index 921eab83cd34..81c89d87723b 100644 ---- a/include/linux/locallock.h -+++ b/include/linux/locallock.h -@@ -3,6 +3,7 @@ - - #include - #include -+#include - - #ifdef CONFIG_PREEMPT_RT_BASE - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0324-drm-vmwgfx-Drop-preempt_disable-in-vmw_fifo_ping_hos.patch b/kernel/patches-4.19.x-rt/0324-drm-vmwgfx-Drop-preempt_disable-in-vmw_fifo_ping_hos.patch deleted file mode 100644 index 32e31ecf3..000000000 --- a/kernel/patches-4.19.x-rt/0324-drm-vmwgfx-Drop-preempt_disable-in-vmw_fifo_ping_hos.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 7a22dfee8ba8e659823a05703d9efb516e7686d2 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 21 Feb 2020 18:57:11 +0100 -Subject: [PATCH 324/328] drm/vmwgfx: Drop preempt_disable() in - vmw_fifo_ping_host() - -[ Upstream commit b901491e7b9b7a676818d84e482b69be72fc142f ] - -vmw_fifo_ping_host() disables preemption around a test and a register -write via vmw_write(). The write function acquires a spinlock_t typed -lock which is not allowed in a preempt_disable()ed section on -PREEMPT_RT. This has been reported in the bugzilla. - -It has been explained by Thomas Hellstrom that this preempt_disable()ed -section is not required for correctness. - -Remove the preempt_disable() section. - -Link: https://bugzilla.kernel.org/show_bug.cgi?id=206591 -Link: https://lkml.kernel.org/r/0b5e1c65d89951de993deab06d1d197b40fd67aa.camel@vmware.com -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -index d0fd147ef75f..fb5a3461bb8c 100644 ---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c -@@ -167,10 +167,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) - { - u32 *fifo_mem = dev_priv->mmio_virt; - -- preempt_disable(); - if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) - vmw_write(dev_priv, SVGA_REG_SYNC, reason); -- preempt_enable(); - } - - void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0325-tracing-make-preempt_lazy-and-migrate_disable-counte.patch b/kernel/patches-4.19.x-rt/0325-tracing-make-preempt_lazy-and-migrate_disable-counte.patch deleted file mode 100644 index 151684a19..000000000 --- a/kernel/patches-4.19.x-rt/0325-tracing-make-preempt_lazy-and-migrate_disable-counte.patch +++ /dev/null @@ -1,55 +0,0 @@ -From d64eb967cd8f5dfd95b39da264635c257ac56564 Mon Sep 17 00:00:00 2001 -From: Sebastian Andrzej Siewior -Date: Fri, 21 Feb 2020 18:15:15 +0100 -Subject: [PATCH 325/328] tracing: make preempt_lazy and migrate_disable - counter smaller - -[ Upstream commit dd430bf5ecb40f9a89679c85868826475d71de54 ] - -The migrate_disable counter should not exceed 255 so it is enough to -store it in an 8bit field. -With this change we can move the `preempt_lazy_count' member into the -gap so the whole struct shrinks by 4 bytes to 12 bytes in total. -Remove the `padding' field, it is not needed. -Update the tracing fields in trace_define_common_fields() (it was -missing the preempt_lazy_count field). - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Steven Rostedt (VMware) ---- - include/linux/trace_events.h | 3 +-- - kernel/trace/trace_events.c | 4 ++-- - 2 files changed, 3 insertions(+), 4 deletions(-) - -diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h -index 72864a11cec0..e26a85c1b7ba 100644 ---- a/include/linux/trace_events.h -+++ b/include/linux/trace_events.h -@@ -62,8 +62,7 @@ struct trace_entry { - unsigned char flags; - unsigned char preempt_count; - int pid; -- unsigned short migrate_disable; -- unsigned short padding; -+ unsigned char migrate_disable; - unsigned char preempt_lazy_count; - }; - -diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c -index 1febb0ca4c81..07b8f5bfd263 100644 ---- a/kernel/trace/trace_events.c -+++ b/kernel/trace/trace_events.c -@@ -188,8 +188,8 @@ static int trace_define_common_fields(void) - __common_field(unsigned char, flags); - __common_field(unsigned char, preempt_count); - __common_field(int, pid); -- __common_field(unsigned short, migrate_disable); -- __common_field(unsigned short, padding); -+ __common_field(unsigned char, migrate_disable); -+ __common_field(unsigned char, preempt_lazy_count); - - return ret; - } --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0326-lib-ubsan-Remove-flags-parameter-from-calls-to-ubsan.patch b/kernel/patches-4.19.x-rt/0326-lib-ubsan-Remove-flags-parameter-from-calls-to-ubsan.patch deleted file mode 100644 index 9b3ed0703..000000000 --- a/kernel/patches-4.19.x-rt/0326-lib-ubsan-Remove-flags-parameter-from-calls-to-ubsan.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 14d40a0de6df764cb0284a2f94b5c54c75eba9ee Mon Sep 17 00:00:00 2001 -From: Tiejun Chen -Date: Mon, 30 Mar 2020 18:45:23 -0400 -Subject: [PATCH 326/328] lib/ubsan: Remove flags parameter from calls to - ubsan_prologue() and ubsan_epilogue() - -Fails to build with CONFIG_UBSAN=y - -lib/ubsan.c: In function '__ubsan_handle_vla_bound_not_positive': -lib/ubsan.c:348:2: error: too many arguments to function 'ubsan_prologue' - ubsan_prologue(&data->location, &flags); - ^~~~~~~~~~~~~~ -lib/ubsan.c:146:13: note: declared here - static void ubsan_prologue(struct source_location *location) - ^~~~~~~~~~~~~~ -lib/ubsan.c:353:2: error: too many arguments to function 'ubsan_epilogue' - ubsan_epilogue(&flags); - ^~~~~~~~~~~~~~ -lib/ubsan.c:155:13: note: declared here - static void ubsan_epilogue(void) - ^~~~~~~~~~~~~~ - -Signed-off-by: Tiejun Chen -Signed-off-by: Steven Rostedt (VMware) ---- - lib/ubsan.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/lib/ubsan.c b/lib/ubsan.c -index 5830cc9a2164..199c75e03469 100644 ---- a/lib/ubsan.c -+++ b/lib/ubsan.c -@@ -339,18 +339,17 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); - void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, - void *bound) - { -- unsigned long flags; - char bound_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - -- ubsan_prologue(&data->location, &flags); -+ ubsan_prologue(&data->location); - - val_to_string(bound_str, sizeof(bound_str), data->type, bound); - pr_err("variable length array bound value %s <= 0\n", bound_str); - -- ubsan_epilogue(&flags); -+ ubsan_epilogue(); - } - EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); - --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0327-irq_work-Fix-checking-of-IRQ_WORK_LAZY-flag-set-on-n.patch b/kernel/patches-4.19.x-rt/0327-irq_work-Fix-checking-of-IRQ_WORK_LAZY-flag-set-on-n.patch deleted file mode 100644 index 99a639619..000000000 --- a/kernel/patches-4.19.x-rt/0327-irq_work-Fix-checking-of-IRQ_WORK_LAZY-flag-set-on-n.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 4fcf946bf6e220c1b07cafc8412afffa5b7e1f10 Mon Sep 17 00:00:00 2001 -From: "Steven Rostedt (VMware)" -Date: Sun, 22 Mar 2020 00:00:28 +0100 -Subject: [PATCH 327/328] irq_work: Fix checking of IRQ_WORK_LAZY flag set on - non PREEMPT_RT - -When CONFIG_PREEMPT_RT_FULL is not set, some of the checks for using -lazy_list are not properly made as the IRQ_WORK_LAZY is not checked. There's -two locations that need this update, so a use_lazy_list() helper function is -added and used in both locations. - -Link: https://lore.kernel.org/r/20200321230028.GA22058@duo.ucw.cz -Reported-by: Pavel Machek -Signed-off-by: Steven Rostedt (VMware) ---- - kernel/irq_work.c | 15 ++++++++------- - 1 file changed, 8 insertions(+), 7 deletions(-) - -diff --git a/kernel/irq_work.c b/kernel/irq_work.c -index 2940622da5b3..b6d9d35941ac 100644 ---- a/kernel/irq_work.c -+++ b/kernel/irq_work.c -@@ -70,6 +70,12 @@ static void __irq_work_queue_local(struct irq_work *work, struct llist_head *lis - arch_irq_work_raise(); - } - -+static inline bool use_lazy_list(struct irq_work *work) -+{ -+ return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) -+ || (work->flags & IRQ_WORK_LAZY); -+} -+ - /* Enqueue the irq work @work on the current CPU */ - bool irq_work_queue(struct irq_work *work) - { -@@ -81,11 +87,10 @@ bool irq_work_queue(struct irq_work *work) - - /* Queue the entry and raise the IPI if needed. */ - preempt_disable(); -- if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) -+ if (use_lazy_list(work)) - list = this_cpu_ptr(&lazy_list); - else - list = this_cpu_ptr(&raised_list); -- - __irq_work_queue_local(work, list); - preempt_enable(); - -@@ -106,7 +111,6 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) - - #else /* CONFIG_SMP: */ - struct llist_head *list; -- bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); - - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); -@@ -116,10 +120,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) - return false; - - preempt_disable(); -- -- lazy_work = work->flags & IRQ_WORK_LAZY; -- -- if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) -+ if (use_lazy_list(work)) - list = &per_cpu(lazy_list, cpu); - else - list = &per_cpu(raised_list, cpu); --- -2.25.1 - diff --git a/kernel/patches-4.19.x-rt/0328-Linux-4.19.106-rt46-REBASE.patch b/kernel/patches-4.19.x-rt/0328-Linux-4.19.106-rt46-REBASE.patch deleted file mode 100644 index 66acd5ae1..000000000 --- a/kernel/patches-4.19.x-rt/0328-Linux-4.19.106-rt46-REBASE.patch +++ /dev/null @@ -1,19 +0,0 @@ -From aad78d70e503de59191ef6b3a9a86ef08857e902 Mon Sep 17 00:00:00 2001 -From: "Steven Rostedt (VMware)" -Date: Tue, 31 Mar 2020 12:32:04 -0400 -Subject: [PATCH 328/328] Linux 4.19.106-rt46 REBASE - ---- - localversion-rt | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/localversion-rt b/localversion-rt -index 1199ebade17b..272158183778 100644 ---- a/localversion-rt -+++ b/localversion-rt -@@ -1 +1 @@ ---rt16 -+-rt46 --- -2.25.1 - From 51f833aa51fd6d3e1ddee3cfb7a2e6474566a8f2 Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 10:24:50 +0100 Subject: [PATCH 2/8] kernel: Update to 5.4.30/4.19.114/4.14.175 Signed-off-by: Rolf Neugebauer --- kernel/Makefile | 12 ++++++------ kernel/config-4.14.x-x86_64 | 2 +- kernel/config-4.19.x-x86_64 | 2 +- kernel/config-5.4.x-aarch64 | 2 +- kernel/config-5.4.x-s390x | 2 +- kernel/config-5.4.x-x86_64 | 2 +- ...ducded-ND_MIN_NAMESPACE_SIZE-from-4MB-to-4K.patch | 2 +- .../0002-hyper-v-trace-vmbus_on_msg_dpc.patch | 2 +- .../0003-hyper-v-trace-vmbus_on_message.patch | 2 +- .../0004-hyper-v-trace-vmbus_onoffer.patch | 2 +- .../0005-hyper-v-trace-vmbus_onoffer_rescind.patch | 2 +- .../0006-hyper-v-trace-vmbus_onopen_result.patch | 2 +- .../0007-hyper-v-trace-vmbus_ongpadl_created.patch | 2 +- .../0008-hyper-v-trace-vmbus_ongpadl_torndown.patch | 2 +- ...0009-hyper-v-trace-vmbus_onversion_response.patch | 2 +- .../0010-hyper-v-trace-vmbus_request_offers.patch | 2 +- .../0011-hyper-v-trace-vmbus_open.patch | 2 +- .../0012-hyper-v-trace-vmbus_close_internal.patch | 2 +- .../0013-hyper-v-trace-vmbus_establish_gpadl.patch | 2 +- .../0014-hyper-v-trace-vmbus_teardown_gpadl.patch | 2 +- .../0015-hyper-v-trace-vmbus_negotiate_version.patch | 2 +- .../0016-hyper-v-trace-vmbus_release_relid.patch | 2 +- ...hyper-v-trace-vmbus_send_tl_connect_request.patch | 2 +- .../0018-hyper-v-trace-channel-events.patch | 2 +- .../0019-serial-forbid-8250-on-s390.patch | 2 +- ...vsc-Allow-only-one-remove-lun-work-item-to-.patch | 2 +- ...vsc-Avoid-excessive-host-scan-on-controller.patch | 2 +- ...ols-Add-fallback-definitions-for-GELF_ST_VI.patch | 2 +- ...api-linux-swab-Fix-potentially-missing-__al.patch | 2 +- ...api-linux-swab-Fix-potentially-missing-__al.patch | 2 +- 30 files changed, 35 insertions(+), 35 deletions(-) diff --git a/kernel/Makefile b/kernel/Makefile index f442c91eb..c18674e76 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -252,18 +252,18 @@ endef # Debug targets only for latest stable and LTS stable # ifeq ($(ARCH),x86_64) -$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG))) +$(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.28,5.4.x,-rt,)) -$(eval $(call kernel,4.19.113,4.19.x,$(EXTRA),$(DEBUG))) -$(eval $(call kernel,4.19.113,4.19.x,,-dbg)) -$(eval $(call kernel,4.14.174,4.14.x,$(EXTRA),$(DEBUG))) +$(eval $(call kernel,4.19.114,4.19.x,$(EXTRA),$(DEBUG))) +$(eval $(call kernel,4.19.114,4.19.x,,-dbg)) +$(eval $(call kernel,4.14.175,4.14.x,$(EXTRA),$(DEBUG))) else ifeq ($(ARCH),aarch64) -$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG))) +$(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.28,5.4.x,-rt,)) else ifeq ($(ARCH),s390x) -$(eval $(call kernel,5.4.28,5.4.x,$(EXTRA),$(DEBUG))) +$(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG))) endif # Target for kernel config diff --git a/kernel/config-4.14.x-x86_64 b/kernel/config-4.14.x-x86_64 index d00fcf0c6..8a2ada79c 100644 --- a/kernel/config-4.14.x-x86_64 +++ b/kernel/config-4.14.x-x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.14.174 Kernel Configuration +# Linux/x86 4.14.175 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y diff --git a/kernel/config-4.19.x-x86_64 b/kernel/config-4.19.x-x86_64 index 107852ecc..4aa5fcba6 100644 --- a/kernel/config-4.19.x-x86_64 +++ b/kernel/config-4.19.x-x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.19.113 Kernel Configuration +# Linux/x86 4.19.114 Kernel Configuration # # diff --git a/kernel/config-5.4.x-aarch64 b/kernel/config-5.4.x-aarch64 index 77f9e873b..0db8d3c6b 100644 --- a/kernel/config-5.4.x-aarch64 +++ b/kernel/config-5.4.x-aarch64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.4.28 Kernel Configuration +# Linux/arm64 5.4.30 Kernel Configuration # # diff --git a/kernel/config-5.4.x-s390x b/kernel/config-5.4.x-s390x index c63399334..98ee71637 100644 --- a/kernel/config-5.4.x-s390x +++ b/kernel/config-5.4.x-s390x @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/s390 5.4.28 Kernel Configuration +# Linux/s390 5.4.30 Kernel Configuration # # diff --git a/kernel/config-5.4.x-x86_64 b/kernel/config-5.4.x-x86_64 index f5e1df64c..b466047ad 100644 --- a/kernel/config-5.4.x-x86_64 +++ b/kernel/config-5.4.x-x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 5.4.28 Kernel Configuration +# Linux/x86 5.4.30 Kernel Configuration # # diff --git a/kernel/patches-4.14.x/0001-NVDIMM-reducded-ND_MIN_NAMESPACE_SIZE-from-4MB-to-4K.patch b/kernel/patches-4.14.x/0001-NVDIMM-reducded-ND_MIN_NAMESPACE_SIZE-from-4MB-to-4K.patch index a8458376b..495d2e28d 100644 --- a/kernel/patches-4.14.x/0001-NVDIMM-reducded-ND_MIN_NAMESPACE_SIZE-from-4MB-to-4K.patch +++ b/kernel/patches-4.14.x/0001-NVDIMM-reducded-ND_MIN_NAMESPACE_SIZE-from-4MB-to-4K.patch @@ -1,4 +1,4 @@ -From 56acceb9b7252dc0c9f3ee01625a9dad8461b8b7 Mon Sep 17 00:00:00 2001 +From 24796cb1c1993f1d90742d39eda9cbec7ba9f93f Mon Sep 17 00:00:00 2001 From: Cheng-mean Liu Date: Tue, 11 Jul 2017 16:58:26 -0700 Subject: [PATCH 01/21] NVDIMM: reducded ND_MIN_NAMESPACE_SIZE from 4MB to 4KB diff --git a/kernel/patches-4.14.x/0002-hyper-v-trace-vmbus_on_msg_dpc.patch b/kernel/patches-4.14.x/0002-hyper-v-trace-vmbus_on_msg_dpc.patch index 78930e1ab..36438387e 100644 --- a/kernel/patches-4.14.x/0002-hyper-v-trace-vmbus_on_msg_dpc.patch +++ b/kernel/patches-4.14.x/0002-hyper-v-trace-vmbus_on_msg_dpc.patch @@ -1,4 +1,4 @@ -From 8c2062401403c8a9a203af268d0ec69ddff1103c Mon Sep 17 00:00:00 2001 +From b438267e99a254b7429bbfba070ab6a10d7aa6aa Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:00 -0700 Subject: [PATCH 02/21] hyper-v: trace vmbus_on_msg_dpc() diff --git a/kernel/patches-4.14.x/0003-hyper-v-trace-vmbus_on_message.patch b/kernel/patches-4.14.x/0003-hyper-v-trace-vmbus_on_message.patch index a7423226c..dbfe256c8 100644 --- a/kernel/patches-4.14.x/0003-hyper-v-trace-vmbus_on_message.patch +++ b/kernel/patches-4.14.x/0003-hyper-v-trace-vmbus_on_message.patch @@ -1,4 +1,4 @@ -From c2b71b1d0a9c4c7eefe8da97ffc4baa4615fa6e5 Mon Sep 17 00:00:00 2001 +From 72a91023eeacb8a23aa133ec2eb990fa03b32bc5 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:01 -0700 Subject: [PATCH 03/21] hyper-v: trace vmbus_on_message() diff --git a/kernel/patches-4.14.x/0004-hyper-v-trace-vmbus_onoffer.patch b/kernel/patches-4.14.x/0004-hyper-v-trace-vmbus_onoffer.patch index 001ca1a91..290c187e8 100644 --- a/kernel/patches-4.14.x/0004-hyper-v-trace-vmbus_onoffer.patch +++ b/kernel/patches-4.14.x/0004-hyper-v-trace-vmbus_onoffer.patch @@ -1,4 +1,4 @@ -From 595d68c0b18c7ebfaa3794fd22ef7b50d5e7bf40 Mon Sep 17 00:00:00 2001 +From b4005dadd829aed2039d665cee1881f49a8b63a1 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:02 -0700 Subject: [PATCH 04/21] hyper-v: trace vmbus_onoffer() diff --git a/kernel/patches-4.14.x/0005-hyper-v-trace-vmbus_onoffer_rescind.patch b/kernel/patches-4.14.x/0005-hyper-v-trace-vmbus_onoffer_rescind.patch index ff8aa0f46..30927b4b4 100644 --- a/kernel/patches-4.14.x/0005-hyper-v-trace-vmbus_onoffer_rescind.patch +++ b/kernel/patches-4.14.x/0005-hyper-v-trace-vmbus_onoffer_rescind.patch @@ -1,4 +1,4 @@ -From 2955351528e97452a9dae475eb64441f2dfefde9 Mon Sep 17 00:00:00 2001 +From 80655cdbb7c8af7ac4e81fe714a58316d0c73d39 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:03 -0700 Subject: [PATCH 05/21] hyper-v: trace vmbus_onoffer_rescind() diff --git a/kernel/patches-4.14.x/0006-hyper-v-trace-vmbus_onopen_result.patch b/kernel/patches-4.14.x/0006-hyper-v-trace-vmbus_onopen_result.patch index 319b7fa27..c9a8e3a82 100644 --- a/kernel/patches-4.14.x/0006-hyper-v-trace-vmbus_onopen_result.patch +++ b/kernel/patches-4.14.x/0006-hyper-v-trace-vmbus_onopen_result.patch @@ -1,4 +1,4 @@ -From 7ec50c0fdbf7dd576cc7fcd3357fc804a2e4a4de Mon Sep 17 00:00:00 2001 +From 0c0455f95486b47ff302b04bfcb9d218b615d3f9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:04 -0700 Subject: [PATCH 06/21] hyper-v: trace vmbus_onopen_result() diff --git a/kernel/patches-4.14.x/0007-hyper-v-trace-vmbus_ongpadl_created.patch b/kernel/patches-4.14.x/0007-hyper-v-trace-vmbus_ongpadl_created.patch index 4d276a6b0..76a35c11d 100644 --- a/kernel/patches-4.14.x/0007-hyper-v-trace-vmbus_ongpadl_created.patch +++ b/kernel/patches-4.14.x/0007-hyper-v-trace-vmbus_ongpadl_created.patch @@ -1,4 +1,4 @@ -From 588dd389b2928b24daa38e901d9145116c2db0d7 Mon Sep 17 00:00:00 2001 +From 6e88aa6f249cc7f482feb19a00810a9a6022ab25 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:05 -0700 Subject: [PATCH 07/21] hyper-v: trace vmbus_ongpadl_created() diff --git a/kernel/patches-4.14.x/0008-hyper-v-trace-vmbus_ongpadl_torndown.patch b/kernel/patches-4.14.x/0008-hyper-v-trace-vmbus_ongpadl_torndown.patch index 9bcbc3b5e..efe3a43fa 100644 --- a/kernel/patches-4.14.x/0008-hyper-v-trace-vmbus_ongpadl_torndown.patch +++ b/kernel/patches-4.14.x/0008-hyper-v-trace-vmbus_ongpadl_torndown.patch @@ -1,4 +1,4 @@ -From 219a97671a3e1ae1914a62c5e7e269df5c68d82b Mon Sep 17 00:00:00 2001 +From ec55daf877edc19269dd16f0ae0ae926bafeed89 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:06 -0700 Subject: [PATCH 08/21] hyper-v: trace vmbus_ongpadl_torndown() diff --git a/kernel/patches-4.14.x/0009-hyper-v-trace-vmbus_onversion_response.patch b/kernel/patches-4.14.x/0009-hyper-v-trace-vmbus_onversion_response.patch index 881981355..75b78a33c 100644 --- a/kernel/patches-4.14.x/0009-hyper-v-trace-vmbus_onversion_response.patch +++ b/kernel/patches-4.14.x/0009-hyper-v-trace-vmbus_onversion_response.patch @@ -1,4 +1,4 @@ -From 18ffc33bbae039509d07f2cc2054499560532859 Mon Sep 17 00:00:00 2001 +From afa9cce6947186ec0b17b0b9172c9d8a5c2d4b14 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:07 -0700 Subject: [PATCH 09/21] hyper-v: trace vmbus_onversion_response() diff --git a/kernel/patches-4.14.x/0010-hyper-v-trace-vmbus_request_offers.patch b/kernel/patches-4.14.x/0010-hyper-v-trace-vmbus_request_offers.patch index 1a1a0b089..69b7ae525 100644 --- a/kernel/patches-4.14.x/0010-hyper-v-trace-vmbus_request_offers.patch +++ b/kernel/patches-4.14.x/0010-hyper-v-trace-vmbus_request_offers.patch @@ -1,4 +1,4 @@ -From cd66081931b2dc974da2c9de5ea3a84886282185 Mon Sep 17 00:00:00 2001 +From ee0128fbee6aa73c5f832990c86a62f95d3fb5f9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:08 -0700 Subject: [PATCH 10/21] hyper-v: trace vmbus_request_offers() diff --git a/kernel/patches-4.14.x/0011-hyper-v-trace-vmbus_open.patch b/kernel/patches-4.14.x/0011-hyper-v-trace-vmbus_open.patch index 0f080bedb..132a4ba04 100644 --- a/kernel/patches-4.14.x/0011-hyper-v-trace-vmbus_open.patch +++ b/kernel/patches-4.14.x/0011-hyper-v-trace-vmbus_open.patch @@ -1,4 +1,4 @@ -From f0fa37ea2a46cb24bf91ea3492623c16381b73d2 Mon Sep 17 00:00:00 2001 +From 6a6c0474ea0640be0f79390c83b42ce0197e08bf Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:09 -0700 Subject: [PATCH 11/21] hyper-v: trace vmbus_open() diff --git a/kernel/patches-4.14.x/0012-hyper-v-trace-vmbus_close_internal.patch b/kernel/patches-4.14.x/0012-hyper-v-trace-vmbus_close_internal.patch index e9a32a00a..e447098c4 100644 --- a/kernel/patches-4.14.x/0012-hyper-v-trace-vmbus_close_internal.patch +++ b/kernel/patches-4.14.x/0012-hyper-v-trace-vmbus_close_internal.patch @@ -1,4 +1,4 @@ -From 563dc5d22087ed6328c45b59d8b2e498acfd7513 Mon Sep 17 00:00:00 2001 +From 5df7a692eacbe0b42f593bda2b81b1d9e9d154c8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:10 -0700 Subject: [PATCH 12/21] hyper-v: trace vmbus_close_internal() diff --git a/kernel/patches-4.14.x/0013-hyper-v-trace-vmbus_establish_gpadl.patch b/kernel/patches-4.14.x/0013-hyper-v-trace-vmbus_establish_gpadl.patch index 28192bafc..fd8303336 100644 --- a/kernel/patches-4.14.x/0013-hyper-v-trace-vmbus_establish_gpadl.patch +++ b/kernel/patches-4.14.x/0013-hyper-v-trace-vmbus_establish_gpadl.patch @@ -1,4 +1,4 @@ -From 2090b4ac2819e64573787437960b92afccd4ccf3 Mon Sep 17 00:00:00 2001 +From 733fc90ecf658625cf227ee85a242dc54ee165d8 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:11 -0700 Subject: [PATCH 13/21] hyper-v: trace vmbus_establish_gpadl() diff --git a/kernel/patches-4.14.x/0014-hyper-v-trace-vmbus_teardown_gpadl.patch b/kernel/patches-4.14.x/0014-hyper-v-trace-vmbus_teardown_gpadl.patch index a67ee0a39..a19252c59 100644 --- a/kernel/patches-4.14.x/0014-hyper-v-trace-vmbus_teardown_gpadl.patch +++ b/kernel/patches-4.14.x/0014-hyper-v-trace-vmbus_teardown_gpadl.patch @@ -1,4 +1,4 @@ -From 89aeff0aa68cb9b1dc1adf28043f7f7f32386e7f Mon Sep 17 00:00:00 2001 +From 514148f54891b8e9bdcf65bc06087f2992e2e1c6 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:12 -0700 Subject: [PATCH 14/21] hyper-v: trace vmbus_teardown_gpadl() diff --git a/kernel/patches-4.14.x/0015-hyper-v-trace-vmbus_negotiate_version.patch b/kernel/patches-4.14.x/0015-hyper-v-trace-vmbus_negotiate_version.patch index 9e63f5035..74c4a9f0e 100644 --- a/kernel/patches-4.14.x/0015-hyper-v-trace-vmbus_negotiate_version.patch +++ b/kernel/patches-4.14.x/0015-hyper-v-trace-vmbus_negotiate_version.patch @@ -1,4 +1,4 @@ -From 9671d96306b5b5b6f1b17822cd2623e2d1f596bb Mon Sep 17 00:00:00 2001 +From 7aa5e1d4543decdf2d210df981b9a12d430fed60 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:13 -0700 Subject: [PATCH 15/21] hyper-v: trace vmbus_negotiate_version() diff --git a/kernel/patches-4.14.x/0016-hyper-v-trace-vmbus_release_relid.patch b/kernel/patches-4.14.x/0016-hyper-v-trace-vmbus_release_relid.patch index fea206280..b7608086d 100644 --- a/kernel/patches-4.14.x/0016-hyper-v-trace-vmbus_release_relid.patch +++ b/kernel/patches-4.14.x/0016-hyper-v-trace-vmbus_release_relid.patch @@ -1,4 +1,4 @@ -From 3d2d34fd831e942463030924f423bda8568ce912 Mon Sep 17 00:00:00 2001 +From 68fe2679ce069ae3dee035f398d56a98979203c9 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:14 -0700 Subject: [PATCH 16/21] hyper-v: trace vmbus_release_relid() diff --git a/kernel/patches-4.14.x/0017-hyper-v-trace-vmbus_send_tl_connect_request.patch b/kernel/patches-4.14.x/0017-hyper-v-trace-vmbus_send_tl_connect_request.patch index f0cd145e1..7741ddb91 100644 --- a/kernel/patches-4.14.x/0017-hyper-v-trace-vmbus_send_tl_connect_request.patch +++ b/kernel/patches-4.14.x/0017-hyper-v-trace-vmbus_send_tl_connect_request.patch @@ -1,4 +1,4 @@ -From 3680bc215f62a93c46e8801ba8d988160d27db8b Mon Sep 17 00:00:00 2001 +From dda4828610e0a1900af6b7c22c5fb99c73b14305 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:15 -0700 Subject: [PATCH 17/21] hyper-v: trace vmbus_send_tl_connect_request() diff --git a/kernel/patches-4.14.x/0018-hyper-v-trace-channel-events.patch b/kernel/patches-4.14.x/0018-hyper-v-trace-channel-events.patch index d319225d6..6c54fb3c7 100644 --- a/kernel/patches-4.14.x/0018-hyper-v-trace-channel-events.patch +++ b/kernel/patches-4.14.x/0018-hyper-v-trace-channel-events.patch @@ -1,4 +1,4 @@ -From 1a5e65cb9456a2d26e60b0249c470f4c90899108 Mon Sep 17 00:00:00 2001 +From 5d99bb75cf678b865f7d7f4befa78a7565711830 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sun, 29 Oct 2017 12:21:16 -0700 Subject: [PATCH 18/21] hyper-v: trace channel events diff --git a/kernel/patches-4.14.x/0019-serial-forbid-8250-on-s390.patch b/kernel/patches-4.14.x/0019-serial-forbid-8250-on-s390.patch index d33bdb58d..45324f3cf 100644 --- a/kernel/patches-4.14.x/0019-serial-forbid-8250-on-s390.patch +++ b/kernel/patches-4.14.x/0019-serial-forbid-8250-on-s390.patch @@ -1,4 +1,4 @@ -From 0da3e404cb0a4a6876a85f80260bd653dabc7826 Mon Sep 17 00:00:00 2001 +From ac3a5d331cbb4ab7551b63a72797b35a5b7e6b3f Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 12 Dec 2017 09:08:35 +0100 Subject: [PATCH 19/21] serial: forbid 8250 on s390 diff --git a/kernel/patches-4.14.x/0020-scsi-storvsc-Allow-only-one-remove-lun-work-item-to-.patch b/kernel/patches-4.14.x/0020-scsi-storvsc-Allow-only-one-remove-lun-work-item-to-.patch index 8435096ce..164509cc1 100644 --- a/kernel/patches-4.14.x/0020-scsi-storvsc-Allow-only-one-remove-lun-work-item-to-.patch +++ b/kernel/patches-4.14.x/0020-scsi-storvsc-Allow-only-one-remove-lun-work-item-to-.patch @@ -1,4 +1,4 @@ -From 3d5ab764aa966d80a2f8df397ade599a8980f443 Mon Sep 17 00:00:00 2001 +From 8d79ab8e5806f8b7a8f2dc8fd6857780c8fb79f5 Mon Sep 17 00:00:00 2001 From: Cathy Avery Date: Tue, 31 Oct 2017 08:52:06 -0400 Subject: [PATCH 20/21] scsi: storvsc: Allow only one remove lun work item to diff --git a/kernel/patches-4.14.x/0021-scsi-storvsc-Avoid-excessive-host-scan-on-controller.patch b/kernel/patches-4.14.x/0021-scsi-storvsc-Avoid-excessive-host-scan-on-controller.patch index 159b41898..a80cc562e 100644 --- a/kernel/patches-4.14.x/0021-scsi-storvsc-Avoid-excessive-host-scan-on-controller.patch +++ b/kernel/patches-4.14.x/0021-scsi-storvsc-Avoid-excessive-host-scan-on-controller.patch @@ -1,4 +1,4 @@ -From 284d1e2fdfa1134ba366198e0e1bd671147399f9 Mon Sep 17 00:00:00 2001 +From fe24c8d1c22e600fc8eaa1ea2ae9a8c0091fc0d5 Mon Sep 17 00:00:00 2001 From: Long Li Date: Tue, 31 Oct 2017 14:58:08 -0700 Subject: [PATCH 21/21] scsi: storvsc: Avoid excessive host scan on controller diff --git a/kernel/patches-4.19.x/0001-perf-symbols-Add-fallback-definitions-for-GELF_ST_VI.patch b/kernel/patches-4.19.x/0001-perf-symbols-Add-fallback-definitions-for-GELF_ST_VI.patch index bb6027bc8..01b54e2fa 100644 --- a/kernel/patches-4.19.x/0001-perf-symbols-Add-fallback-definitions-for-GELF_ST_VI.patch +++ b/kernel/patches-4.19.x/0001-perf-symbols-Add-fallback-definitions-for-GELF_ST_VI.patch @@ -1,4 +1,4 @@ -From 66f0f687d1a77d74639a694d5f95900970851560 Mon Sep 17 00:00:00 2001 +From 5f86d3c6c5e6e3e3ea93cef6a76a85cffd9a6372 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 4 Feb 2019 15:48:03 -0300 Subject: [PATCH 1/2] perf symbols: Add fallback definitions for diff --git a/kernel/patches-4.19.x/0002-include-uapi-linux-swab-Fix-potentially-missing-__al.patch b/kernel/patches-4.19.x/0002-include-uapi-linux-swab-Fix-potentially-missing-__al.patch index e5309ca74..a553d88b3 100644 --- a/kernel/patches-4.19.x/0002-include-uapi-linux-swab-Fix-potentially-missing-__al.patch +++ b/kernel/patches-4.19.x/0002-include-uapi-linux-swab-Fix-potentially-missing-__al.patch @@ -1,4 +1,4 @@ -From 4156e51c13d1f856eb335454b2d4d4ebcb41dd59 Mon Sep 17 00:00:00 2001 +From 8ca0e7b06f5c0a48f434e50e09ae91372e686235 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Wed, 3 Jan 2018 09:57:30 +0000 Subject: [PATCH 2/2] include/uapi/linux/swab: Fix potentially missing diff --git a/kernel/patches-5.4.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch b/kernel/patches-5.4.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch index 9b52976db..6b1d27e70 100644 --- a/kernel/patches-5.4.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch +++ b/kernel/patches-5.4.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch @@ -1,4 +1,4 @@ -From bb2c12a651f3f5efe5e2d8327e3cfd68b95d4b85 Mon Sep 17 00:00:00 2001 +From 194853f9b9fe9fcf424410cf5c455390caf91385 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Wed, 3 Jan 2018 09:57:30 +0000 Subject: [PATCH] include/uapi/linux/swab: Fix potentially missing From e208e3050cb39bcc218ccae4ccf96c57f9eeec32 Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 10:49:19 +0100 Subject: [PATCH 3/8] kernel: Add v5.6.x kernels The config is based on 5.4.x and has WireGaurd enabled manually as a module afterwards. Signed-off-by: Rolf Neugebauer --- kernel/Makefile | 3 + kernel/config-5.6.x-aarch64 | 5016 +++++++++++++++++ kernel/config-5.6.x-s390x | 3715 ++++++++++++ kernel/config-5.6.x-x86_64 | 4699 +++++++++++++++ ...ux-swab-Fix-potentially-missing-__al.patch | 55 + 5 files changed, 13488 insertions(+) create mode 100644 kernel/config-5.6.x-aarch64 create mode 100644 kernel/config-5.6.x-s390x create mode 100644 kernel/config-5.6.x-x86_64 create mode 100644 kernel/patches-5.6.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch diff --git a/kernel/Makefile b/kernel/Makefile index c18674e76..65cf59a00 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -252,6 +252,7 @@ endef # Debug targets only for latest stable and LTS stable # ifeq ($(ARCH),x86_64) +$(eval $(call kernel,5.6.2,5.6.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.28,5.4.x,-rt,)) $(eval $(call kernel,4.19.114,4.19.x,$(EXTRA),$(DEBUG))) @@ -259,10 +260,12 @@ $(eval $(call kernel,4.19.114,4.19.x,,-dbg)) $(eval $(call kernel,4.14.175,4.14.x,$(EXTRA),$(DEBUG))) else ifeq ($(ARCH),aarch64) +$(eval $(call kernel,5.6.2,5.6.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.28,5.4.x,-rt,)) else ifeq ($(ARCH),s390x) +$(eval $(call kernel,5.6.2,5.6.x,$(EXTRA),$(DEBUG))) $(eval $(call kernel,5.4.30,5.4.x,$(EXTRA),$(DEBUG))) endif diff --git a/kernel/config-5.6.x-aarch64 b/kernel/config-5.6.x-aarch64 new file mode 100644 index 000000000..09738e068 --- /dev/null +++ b/kernel/config-5.6.x-aarch64 @@ -0,0 +1,5016 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 5.6.2 Kernel Configuration +# + +# +# Compiler: gcc (Alpine 8.3.0) 8.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80300 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-linuxkit" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_BOOT_CONFIG is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_HAVE_FUTEX_CMPXCHG=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_AGILEX is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +CONFIG_ARCH_BCM2835=y +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_BRCMSTB is not set +CONFIG_ARCH_EXYNOS=y +# CONFIG_ARCH_K3 is not set +CONFIG_ARCH_LAYERSCAPE=y +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_MXC is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_S32 is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_STRATIX10 is not set +CONFIG_ARCH_SYNQUACER=y +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_XGENE is not set +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_NVHE=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_FUJITSU_ERRATUM_010001=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +# CONFIG_NUMA is not set +CONFIG_HOLES_IN_ZONE=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_SECCOMP=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +# CONFIG_KEXEC_FILE is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_XEN_DOM0=y +CONFIG_XEN=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +CONFIG_HARDEN_EL2_VECTORS=y +CONFIG_ARM64_SSBD=y +CONFIG_RODATA_FULL_DEFAULT_ENABLED=y +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +# CONFIG_COMPAT is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +CONFIG_ARM64_VHE=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +CONFIG_ARM64_PTR_AUTH=y +# end of ARMv8.3 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_ARM64_E0PD=y +CONFIG_ARCH_RANDOM=y +# end of ARMv8.5 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_MODULE_PLTS=y +# CONFIG_ARM64_PSEUDO_NMI is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +# end of Kernel Features + +# +# Boot options +# +# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set +CONFIG_CMDLINE="" +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y + +# +# Power management options +# +# CONFIG_SUSPEND is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +CONFIG_ARM_CPUIDLE=y +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +# CONFIG_ACPI_CPPC_CPUFREQ is not set +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# CONFIG_QORIQ_CPUFREQ is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +# +# Firmware Drivers +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# CONFIG_ARM_SCPI_PROTOCOL is not set +# CONFIG_ARM_SDE_INTERFACE is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_RASPBERRYPI_FIRMWARE=y +# CONFIG_FW_CFG_SYSFS is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +CONFIG_RESET_ATTACK_MITIGATION=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_DEBUGGER=y +# CONFIG_ACPI_DEBUGGER_USER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_EC_DEBUGFS=y +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=y +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_HAVE_ACPI_APEI=y +# CONFIG_ACPI_APEI is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_CONFIGFS=y +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_PPTT=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_PMU=y +CONFIG_KVM_INDIRECT_VECTORS=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA256_ARM64=y +CONFIG_CRYPTO_SHA512_ARM64=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +# CONFIG_CRYPTO_SM3_ARM64_CE is not set +# CONFIG_CRYPTO_SM4_ARM64_CE is not set +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_CHACHA20_NEON=y +CONFIG_CRYPTO_POLY1305_NEON=m +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_AES_ARM64_BS=y + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_CLONE_BACKWARDS=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_GCC_PLUGIN_RANDSTRUCT=y +CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +# CONFIG_FLATMEM_MANUAL is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=y +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IP_TUNNEL=y +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=y +CONFIG_NET_FOU=y +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_UDP_DIAG=y +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=y +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=y +CONFIG_IPV6_FOU_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +# CONFIG_MPTCP is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NETFILTER_NETLINK_OSF=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_LOG_COMMON=y +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_SECMARK is not set +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NF_CT_NETLINK_HELPER=y +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_SIP=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=y +CONFIG_NF_TABLES=y +# CONFIG_NF_TABLES_SET is not set +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +# CONFIG_NFT_NUMGEN is not set +CONFIG_NFT_CT=y +CONFIG_NFT_COUNTER=y +CONFIG_NFT_CONNLIMIT=y +CONFIG_NFT_LOG=y +CONFIG_NFT_LIMIT=y +CONFIG_NFT_MASQ=y +CONFIG_NFT_REDIR=y +CONFIG_NFT_NAT=y +CONFIG_NFT_TUNNEL=y +# CONFIG_NFT_OBJREF is not set +CONFIG_NFT_QUEUE=y +# CONFIG_NFT_QUOTA is not set +CONFIG_NFT_REJECT=y +CONFIG_NFT_REJECT_INET=y +CONFIG_NFT_COMPAT=y +CONFIG_NFT_HASH=y +# CONFIG_NFT_XFRM is not set +# CONFIG_NFT_SOCKET is not set +CONFIG_NFT_OSF=y +CONFIG_NFT_TPROXY=y +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=y +CONFIG_NFT_DUP_NETDEV=y +CONFIG_NFT_FWD_NETDEV=y +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y +CONFIG_NETFILTER_XT_SET=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CT=y +CONFIG_NETFILTER_XT_TARGET_DSCP=y +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_HMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_NETMAP=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_RATEEST=y +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CLUSTER=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_DCCP=y +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y +CONFIG_NETFILTER_XT_MATCH_L2TP=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_NFACCT=y +CONFIG_NETFILTER_XT_MATCH_OSF=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_RATEEST=y +CONFIG_NETFILTER_XT_MATCH_REALM=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SCTP=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# end of Core Netfilter Configuration + +CONFIG_IP_SET=y +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=y +CONFIG_IP_SET_BITMAP_IPMAC=y +CONFIG_IP_SET_BITMAP_PORT=y +CONFIG_IP_SET_HASH_IP=y +# CONFIG_IP_SET_HASH_IPMARK is not set +CONFIG_IP_SET_HASH_IPPORT=y +CONFIG_IP_SET_HASH_IPPORTIP=y +CONFIG_IP_SET_HASH_IPPORTNET=y +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=y +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=y +CONFIG_IP_SET_HASH_NETIFACE=y +CONFIG_IP_SET_LIST_SET=y +CONFIG_IP_VS=y +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=y +CONFIG_IP_VS_WRR=y +CONFIG_IP_VS_LC=y +CONFIG_IP_VS_WLC=y +CONFIG_IP_VS_FO=y +CONFIG_IP_VS_OVF=y +CONFIG_IP_VS_LBLC=y +CONFIG_IP_VS_LBLCR=y +CONFIG_IP_VS_DH=y +CONFIG_IP_VS_SH=y +CONFIG_IP_VS_MH=y +CONFIG_IP_VS_SED=y +CONFIG_IP_VS_NQ=y + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=y +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=y + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_NF_TPROXY_IPV4=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=y +CONFIG_NFT_DUP_IPV4=y +# CONFIG_NFT_FIB_IPV4 is not set +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=y +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_SNMP_BASIC=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_SYNPROXY=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_CLUSTERIP=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=y +CONFIG_NF_TPROXY_IPV6=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=y +CONFIG_NFT_DUP_IPV6=y +# CONFIG_NFT_FIB_IPV6 is not set +CONFIG_NF_DUP_IPV6=y +CONFIG_NF_REJECT_IPV6=y +CONFIG_NF_LOG_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_MATCH_RT=y +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_TARGET_SYNPROXY=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_IP6_NF_SECURITY=y +CONFIG_IP6_NF_NAT=y +CONFIG_IP6_NF_TARGET_MASQUERADE=y +CONFIG_IP6_NF_TARGET_NPT=y +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_TABLES_BRIDGE=y +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=y +CONFIG_NF_LOG_BRIDGE=y +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_802_3=y +CONFIG_BRIDGE_EBT_AMONG=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_LIMIT=y +CONFIG_BRIDGE_EBT_MARK=y +CONFIG_BRIDGE_EBT_PKTTYPE=y +CONFIG_BRIDGE_EBT_STP=y +CONFIG_BRIDGE_EBT_VLAN=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_MARK_T=y +CONFIG_BRIDGE_EBT_REDIRECT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE_EBT_LOG=y +CONFIG_BRIDGE_EBT_NFLOG=y +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_CAKE is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +CONFIG_NET_SCH_INGRESS=m +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_ETS is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=y +CONFIG_NET_CLS_TCINDEX=y +CONFIG_NET_CLS_ROUTE4=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=y +CONFIG_NET_CLS_RSVP6=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=y +# CONFIG_NET_CLS_FLOWER is not set +CONFIG_NET_CLS_MATCHALL=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_EMATCH_IPSET=y +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=y +CONFIG_NET_ACT_NAT=y +CONFIG_NET_ACT_PEDIT=y +CONFIG_NET_ACT_SIMP=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_NET_ACT_CSUM=y +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +CONFIG_NET_ACT_BPF=y +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=y +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_XEN is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +# CONFIG_PCIEAER is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_LABEL=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_XILINX is not set +# CONFIG_PCI_XGENE is not set +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCIE_BRCMSTB is not set + +# +# DesignWare PCI Core Support +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_LAYERSCAPE is not set +CONFIG_PCI_HISI=y +CONFIG_PCIE_QCOM=y +# CONFIG_PCIE_KIRIN is not set +CONFIG_PCIE_HISI_STB=y +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_AL is not set +# end of DesignWare PCI Core Support + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_MMIO=y +CONFIG_GENERIC_ARCH_TOPOLOGY=y +# end of Generic Driver Options + +# +# Bus devices +# +CONFIG_ARM_CCI=y +CONFIG_ARM_CCI400_COMMON=y +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_HISILICON_LPC is not set +CONFIG_QCOM_EBI2=y +# CONFIG_SIMPLE_PM_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_FSL_MC_BUS is not set +# end of Bus devices + +# CONFIG_CONNECTOR is not set +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=m +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=y +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_CDROM_PKTCDVD is not set +CONFIG_ATA_OVER_ETH=m +CONFIG_XEN_BLKDEV_FRONTEND=y +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +# CONFIG_NVME_MULTIPATH is not set +# CONFIG_NVME_HWMON is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# CONFIG_NVME_TARGET is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# +# CONFIG_VOP_BUS is not set +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +# CONFIG_SCSI_SAS_ATA is not set +CONFIG_SCSI_SAS_HOST_SMP=y +# CONFIG_SCSI_SRP_ATTRS is not set +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=y +# CONFIG_SCSI_HISI_SAS_PCI is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=y +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_XEN_SCSI_FRONTEND=y +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_DH is not set +# end of SCSI device support + +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +# CONFIG_ATA_VERBOSE_ERROR is not set +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +# CONFIG_SATA_PMP is not set + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_QORIQ is not set +CONFIG_SATA_AHCI_SEATTLE=y +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +CONFIG_PATA_SIS=y +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +CONFIG_PATA_PLATFORM=y +CONFIG_PATA_OF_PLATFORM=y +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +CONFIG_PATA_ACPI=y +CONFIG_ATA_GENERIC=y +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=y +CONFIG_DM_PERSISTENT_DATA=y +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=y +# CONFIG_DM_CACHE is not set +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_CLONE is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_DELAY is not set +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +# CONFIG_TARGET_CORE is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +# CONFIG_FUSION_SAS is not set +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_IFB is not set +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=y +# CONFIG_IPVTAP is not set +CONFIG_VXLAN=y +CONFIG_GENEVE=m +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=y +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +CONFIG_NLMON=y +# CONFIG_NET_VRF is not set +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_FREESCALE is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +# CONFIG_HNS3 is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_HINIC is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +# CONFIG_MLX5_CORE_IPOIB is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_SNI_NETSEC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +CONFIG_MDIO_THUNDER=m +CONFIG_PHYLIB=m +CONFIG_SWPHY=y + +# +# MII PHY device drivers +# +# CONFIG_ADIN_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_DP83869_PHY is not set +CONFIG_FIXED_PHY=m +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +CONFIG_MICROCHIP_PHY=m +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +# CONFIG_SLIP is not set +CONFIG_SLHC=m +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +# CONFIG_USB_NET_CDCETHER is not set +# CONFIG_USB_NET_CDC_EEM is not set +# CONFIG_USB_NET_CDC_NCM is not set +# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set +# CONFIG_USB_NET_CDC_MBIM is not set +# CONFIG_USB_NET_DM9601 is not set +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +# CONFIG_USB_NET_SMSC75XX is not set +CONFIG_USB_NET_SMSC95XX=m +# CONFIG_USB_NET_GL620A is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_SIERRA_NET is not set +# CONFIG_USB_NET_CH9200 is not set +# CONFIG_USB_NET_AQC111 is not set +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_WLAN_VENDOR_ATMEL=y +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_WLAN_VENDOR_ST=y +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WLAN_VENDOR_ZYDAS=y +CONFIG_WLAN_VENDOR_QUANTENNA=y + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_XEN_NETDEV_FRONTEND=y +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GP2A is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_INPUT_HISI_POWERKEY is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +# CONFIG_SERIAL_8250_ASPEED_VUART is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_RSA is not set +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_BCM2835AUX=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +CONFIG_SERIAL_SAMSUNG=y +CONFIG_SERIAL_SAMSUNG_UARTS_4=y +CONFIG_SERIAL_SAMSUNG_UARTS=4 +CONFIG_SERIAL_SAMSUNG_CONSOLE=y +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_HVC_DCC is not set +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_BCM2835=y +CONFIG_HW_RANDOM_IPROC_RNG200=y +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISI_V2=y +CONFIG_HW_RANDOM_CAVIUM=y +CONFIG_HW_RANDOM_EXYNOS=y +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +CONFIG_TCG_TIS=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_XEN=m +CONFIG_TCG_CRB=y +CONFIG_TCG_VTPM_PROXY=m +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_PINCTRL is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_BCM2835=m +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_EXYNOS5 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_IMX is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_QUP is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_SYNQUACER is not set +# CONFIG_I2C_VERSATILE is not set +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +# CONFIG_I2C_XLP9XX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_OCELOT is not set +CONFIG_PINCTRL_BCM2835=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8960 is not set +# CONFIG_PINCTRL_MDM9615 is not set +# CONFIG_PINCTRL_MSM8X74 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCS404 is not set +# CONFIG_PINCTRL_QDF2XXX is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SM8150 is not set +CONFIG_PINCTRL_SAMSUNG=y +CONFIG_PINCTRL_EXYNOS=y +CONFIG_PINCTRL_EXYNOS_ARM64=y +# CONFIG_PINCTRL_EQUILIBRIUM is not set +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_AMDPT is not set +CONFIG_GPIO_RASPBERRYPI_EXP=y +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_MPC8XXX is not set +# CONFIG_GPIO_PL061 is not set +# CONFIG_GPIO_SAMA5D2_PIOBU is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_THUNDERX=m +# CONFIG_GPIO_XGENE is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_XLP is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_HISI is not set +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_ISP1704 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_PWM_FAN is not set +CONFIG_SENSORS_RASPBERRYPI_HWMON=y +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP513 is not set +# CONFIG_SENSORS_VEXPRESS is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=y +# CONFIG_QORIQ_THERMAL is not set + +# +# Broadcom thermal drivers +# +# CONFIG_BCM2711_THERMAL is not set +# CONFIG_BCM2835_THERMAL is not set +# end of Broadcom thermal drivers + +# +# Samsung thermal drivers +# +CONFIG_EXYNOS_THERMAL=y +# end of Samsung thermal drivers + +# +# Qualcomm thermal drivers +# +# end of Qualcomm thermal drivers + +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_EXYNOS_LPASS is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD70528 is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +CONFIG_MFD_VEXPRESS_SYSREG=y +# CONFIG_RAVE_SP_CORE is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGA_ARB is not set +# CONFIG_DRM is not set + +# +# ARM devices +# +# end of ARM devices + +# +# ACP (Audio CoProcessor) Configuration +# +# end of ACP (Audio CoProcessor) Configuration + +CONFIG_DRM_RCAR_WRITEBACK=y +# CONFIG_DRM_XEN is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_XEN_FBDEV_FRONTEND=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +# CONFIG_LCD_CLASS_DEVICE is not set +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +# end of Backlight & LCD device support + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_OTG=y +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_OTG_FSM is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=m +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=m +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=m +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_EXYNOS is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_OHCI_HCD=m +CONFIG_USB_OHCI_HCD_PCI=m +# CONFIG_USB_OHCI_EXYNOS is not set +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=m +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +CONFIG_USB_DWC2=m +CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +CONFIG_USB_PHY=y +CONFIG_NOP_USB_XCEIV=m +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_ARMMMCI is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +# CONFIG_MMC_SDHCI_PCI is not set +# CONFIG_MMC_SDHCI_ACPI is not set +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_ASPEED is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_ESDHC is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +CONFIG_MMC_SDHCI_IPROC=m +# CONFIG_MMC_SDHCI_MSM is not set +# CONFIG_MMC_TIFM_SD is not set +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_CAVIUM_THUNDERX is not set +# CONFIG_MMC_DW is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +# CONFIG_MMC_CQHCI is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +CONFIG_MMC_BCM2835=m +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=y +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_FSL_FTM_ALARM=y +CONFIG_HAVE_S3C_RTC=y +CONFIG_RTC_DRV_S3C=y +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=m +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +CONFIG_DMA_BCM2835=m +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_DW_EDMA_PCIE is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=y +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=y +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=y +CONFIG_XEN_GRANT_DEV_ALLOC=y +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_PVCALLS_FRONTEND=y +CONFIG_XEN_PRIVCMD=y +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_FSL_SAI is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_CLK_LS1028A_PLLDIG=y +# CONFIG_COMMON_CLK_XGENE is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_CLK_BCM2835=y +# CONFIG_CLK_RASPBERRYPI is not set +CONFIG_COMMON_CLK_HI3516CV300=y +# CONFIG_COMMON_CLK_HI3519 is not set +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +# CONFIG_STUB_CLK_HI3660 is not set +# CONFIG_COMMON_CLK_QCOM is not set +CONFIG_COMMON_CLK_SAMSUNG=y +CONFIG_EXYNOS_ARM64_COMMON_CLK=y +# CONFIG_EXYNOS_AUDSS_CLK_CON is not set +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +CONFIG_CLKSRC_VERSATILE=y +# CONFIG_MICROCHIP_PIT64B is not set +# end of Clock Source drivers + +CONFIG_MAILBOX=y +# CONFIG_ARM_MHU is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +# CONFIG_PCC is not set +# CONFIG_ALTERA_MBOX is not set +CONFIG_BCM2835_MBOX=y +# CONFIG_HI3660_MBOX is not set +# CONFIG_HI6220_MBOX is not set +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +# CONFIG_IOMMU_SUPPORT is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +CONFIG_BCM2835_POWER=y +CONFIG_RASPBERRYPI_POWER=y +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_FSL_DPAA is not set +# CONFIG_QUICC_ENGINE is not set +CONFIG_DPAA2_CONSOLE=y +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPMH is not set +# end of Qualcomm SoC drivers + +CONFIG_SOC_SAMSUNG=y +CONFIG_EXYNOS_CHIPID=y +CONFIG_EXYNOS_PMU=y +CONFIG_EXYNOS_PM_DOMAINS=y +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_ARM_EXYNOS_BUS_DEVFREQ is not set +# CONFIG_PM_DEVFREQ_EVENT is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +# CONFIG_EXTCON_GPIO is not set +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +CONFIG_PWM_BCM2835=m +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_SAMSUNG is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_LS_EXTIRQ=y +CONFIG_LS_SCFG_MSI=y +CONFIG_PARTITION_PERCPU=y +# CONFIG_QCOM_IRQ_COMBINER is not set +# CONFIG_QCOM_PDC is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_BRCMSTB_RESCAL is not set +# CONFIG_RESET_INTEL_GW is not set +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +CONFIG_COMMON_RESET_HI3660=y +# CONFIG_COMMON_RESET_HI6220 is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_PHY_XGENE is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_CADENCE_DP is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_FSL_IMX8MQ_USB is not set +# CONFIG_PHY_MIXEL_MIPI_DPHY is not set +# CONFIG_PHY_HI6220_USB is not set +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_EXYNOS_DP_VIDEO is not set +# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set +# CONFIG_PHY_EXYNOS_PCIE is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# CONFIG_PHY_INTEL_EMMC is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_ARM_CCI_PMU=y +CONFIG_ARM_CCI400_PMU=y +CONFIG_ARM_CCI5xx_PMU=y +CONFIG_ARM_CCN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +# CONFIG_ARM_DSU_PMU is not set +# CONFIG_HISI_PMU is not set +# CONFIG_QCOM_L2_PMU is not set +# CONFIG_QCOM_L3_PMU is not set +# CONFIG_ARM_SPE_PMU is not set +# end of Performance monitor support + +# CONFIG_RAS is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y +# CONFIG_QCOM_QFPROM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +# CONFIG_TEE is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_VALIDATE_FS_PARSER=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_ALGS=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +# CONFIG_AUTOFS4_FS is not set +# CONFIG_AUTOFS_FS is not set +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=y +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="utf8" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +# CONFIG_NFSD_V4_SECURITY_LABEL is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +# CONFIG_CIFS_ROOT is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY_DMESG_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="/sbin/usermode-helper" +# CONFIG_SECURITY_SELINUX is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_WP512 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# +CONFIG_GCC_PLUGIN_STRUCTLEAK=y + +# +# Memory initialization +# +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y +# CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE is not set +CONFIG_GCC_PLUGIN_STACKLEAK=y +CONFIG_STACKLEAK_TRACK_MIN_SIZE=100 +# CONFIG_STACKLEAK_METRICS is not set +# CONFIG_STACKLEAK_RUNTIME_DISABLE is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_CURVE25519 is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=y +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=y +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=y +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=y + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_VMAC=y + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_XXHASH=m +CONFIG_CRYPTO_BLAKE2B=m +# CONFIG_CRYPTO_BLAKE2S is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=y +CONFIG_CRYPTO_RMD128=y +CONFIG_CRYPTO_RMD160=y +CONFIG_CRYPTO_RMD256=y +CONFIG_CRYPTO_RMD320=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=y +CONFIG_CRYPTO_WP512=y + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_BLOWFISH_COMMON=y +CONFIG_CRYPTO_CAMELLIA=y +CONFIG_CRYPTO_CAST_COMMON=y +CONFIG_CRYPTO_CAST5=y +CONFIG_CRYPTO_CAST6=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=y +CONFIG_CRYPTO_KHAZAD=y +CONFIG_CRYPTO_SALSA20=y +CONFIG_CRYPTO_CHACHA20=y +CONFIG_CRYPTO_SEED=y +CONFIG_CRYPTO_SERPENT=y +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_842=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_LZ4HC=y +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m +CONFIG_CRYPTO_LIB_BLAKE2S=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_FSL_CAAM is not set +# CONFIG_CRYPTO_DEV_EXYNOS_RNG is not set +# CONFIG_CRYPTO_DEV_S5P is not set +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CAVIUM_CPT is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +CONFIG_CRYPTO_DEV_QCE=m +CONFIG_CRYPTO_DEV_QCE_SKCIPHER=y +CONFIG_CRYPTO_DEV_QCE_SHA=y +CONFIG_CRYPTO_DEV_QCE_ENABLE_ALL=y +# CONFIG_CRYPTO_DEV_QCE_ENABLE_SKCIPHER is not set +# CONFIG_CRYPTO_DEV_QCE_ENABLE_SHA is not set +CONFIG_CRYPTO_DEV_QCOM_RNG=m +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +# CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE is not set +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +# CONFIG_CORDIC is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +# CONFIG_INDIRECT_PIO is not set +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=y +CONFIG_842_DECOMPRESS=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_REMAP=y +CONFIG_DMA_DIRECT_REMAP=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_SPLIT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_BTF is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_FS=y +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_SANITIZE_ALL is not set +CONFIG_UBSAN_NO_ALIGNMENT=y +# CONFIG_TEST_UBSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_NO_SANITY=y +CONFIG_PAGE_POISONING_ZERO=y +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_WQ_WATCHDOG=y +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +CONFIG_DEBUG_PREEMPT=y + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_SAMPLES is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking diff --git a/kernel/config-5.6.x-s390x b/kernel/config-5.6.x-s390x new file mode 100644 index 000000000..92f864626 --- /dev/null +++ b/kernel/config-5.6.x-s390x @@ -0,0 +1,3715 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/s390 5.6.2 Kernel Configuration +# + +# +# Compiler: gcc (Alpine 8.3.0) 8.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80300 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-linuxkit" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_UNCOMPRESSED=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_BZIP2=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_UNCOMPRESSED is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_BOOT_CONFIG is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_HAVE_FUTEX_CMPXCHG=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +# CONFIG_USERFAULTFD is not set +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_MMU=y +CONFIG_ZONE_DMA=y +CONFIG_CPU_BIG_ENDIAN=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_PGSTE=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_AUDIT_ARCH=y +CONFIG_NO_IOPORT_MAP=y +CONFIG_PCI_QUIRKS=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_S390=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_PGTABLE_LEVELS=5 +CONFIG_HAVE_LIVEPATCH=y + +# +# Processor type and features +# +CONFIG_TUNE_DEFAULT=y +CONFIG_64BIT=y +CONFIG_COMPAT=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +# CONFIG_NUMA is not set +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_BOOK=y +CONFIG_SCHED_DRAWER=y +CONFIG_SCHED_TOPOLOGY=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_KEXEC=y +CONFIG_ARCH_RANDOM=y +CONFIG_KERNEL_NOBP=y +CONFIG_EXPOLINE=y +# CONFIG_EXPOLINE_OFF is not set +# CONFIG_EXPOLINE_AUTO is not set +CONFIG_EXPOLINE_FULL=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +# end of Processor type and features + +# +# Memory setup +# +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_FORCE_MAX_ZONEORDER=9 +CONFIG_MAX_PHYSMEM_BITS=46 +CONFIG_PACK_STACK=y +# CONFIG_WARN_DYNAMIC_STACK is not set +# end of Memory setup + +# +# I/O subsystem +# +CONFIG_QDIO=y +CONFIG_PCI_NR_FUNCTIONS=128 +CONFIG_HAS_IOMEM=y +CONFIG_CHSC_SCH=m +CONFIG_SCM_BUS=y +CONFIG_EADM_SCH=m +# end of I/O subsystem + +# +# Dump support +# +# CONFIG_CRASH_DUMP is not set +# end of Dump support + +CONFIG_SECCOMP=y + +# +# Power Management +# +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +# CONFIG_HIBERNATION is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# end of Power Management + +CONFIG_CCW=y +CONFIG_HAVE_PNETID=y + +# +# Virtualization +# +# CONFIG_PROTECTED_VIRTUALIZATION_GUEST is not set +CONFIG_PFAULT=y +# CONFIG_CMM is not set +# CONFIG_APPLDATA_BASE is not set +CONFIG_S390_HYPFS_FS=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_KVM_ASYNC_PF_SYNC=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_HAVE_KVM_INVALID_WAKEUPS=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_KVM_S390_UCONTROL is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_S390_GUEST=y +# end of Virtualization + +# +# Selftests +# +# CONFIG_S390_UNWIND_SELFTEST is not set +# end of Selftests + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +# CONFIG_OPROFILE is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_NO_GATHER=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING=y +CONFIG_ARCH_HAS_SCALED_CPUTIME=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_CLONE_BACKWARDS2=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_OLD_SIGACTION=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_CPU_NO_EFFICIENT_FFS=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_GCC_PLUGIN_RANDSTRUCT=y +CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_REL_CRCS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_IBM_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_TRYLOCK=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_TRYLOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_TRYLOCK=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_TRYLOCK=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MEMBLOCK_PHYS_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_MEM_SOFT_DIRTY is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=y +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_IUCV=y +CONFIG_AFIUCV=m +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IP_TUNNEL=y +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=y +CONFIG_NET_FOU=y +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_UDP_DIAG=y +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=y +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=y +CONFIG_IPV6_FOU_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +# CONFIG_MPTCP is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NETFILTER_NETLINK_OSF=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_LOG_COMMON=y +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_SECMARK is not set +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NF_CT_NETLINK_HELPER=y +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_SIP=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=y +CONFIG_NF_TABLES=y +# CONFIG_NF_TABLES_SET is not set +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +# CONFIG_NFT_NUMGEN is not set +CONFIG_NFT_CT=y +CONFIG_NFT_COUNTER=y +CONFIG_NFT_CONNLIMIT=y +CONFIG_NFT_LOG=y +CONFIG_NFT_LIMIT=y +CONFIG_NFT_MASQ=y +CONFIG_NFT_REDIR=y +CONFIG_NFT_NAT=y +CONFIG_NFT_TUNNEL=y +# CONFIG_NFT_OBJREF is not set +CONFIG_NFT_QUEUE=y +# CONFIG_NFT_QUOTA is not set +CONFIG_NFT_REJECT=y +CONFIG_NFT_REJECT_INET=y +CONFIG_NFT_COMPAT=y +CONFIG_NFT_HASH=y +# CONFIG_NFT_XFRM is not set +# CONFIG_NFT_SOCKET is not set +CONFIG_NFT_OSF=y +CONFIG_NFT_TPROXY=y +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=y +CONFIG_NFT_DUP_NETDEV=y +CONFIG_NFT_FWD_NETDEV=y +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y +CONFIG_NETFILTER_XT_SET=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CT=y +CONFIG_NETFILTER_XT_TARGET_DSCP=y +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_HMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_NETMAP=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_RATEEST=y +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CLUSTER=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_DCCP=y +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y +CONFIG_NETFILTER_XT_MATCH_L2TP=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_NFACCT=y +CONFIG_NETFILTER_XT_MATCH_OSF=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_RATEEST=y +CONFIG_NETFILTER_XT_MATCH_REALM=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SCTP=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# end of Core Netfilter Configuration + +CONFIG_IP_SET=y +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=y +CONFIG_IP_SET_BITMAP_IPMAC=y +CONFIG_IP_SET_BITMAP_PORT=y +CONFIG_IP_SET_HASH_IP=y +# CONFIG_IP_SET_HASH_IPMARK is not set +CONFIG_IP_SET_HASH_IPPORT=y +CONFIG_IP_SET_HASH_IPPORTIP=y +CONFIG_IP_SET_HASH_IPPORTNET=y +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=y +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=y +CONFIG_IP_SET_HASH_NETIFACE=y +CONFIG_IP_SET_LIST_SET=y +CONFIG_IP_VS=y +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=y +CONFIG_IP_VS_WRR=y +CONFIG_IP_VS_LC=y +CONFIG_IP_VS_WLC=y +CONFIG_IP_VS_FO=y +CONFIG_IP_VS_OVF=y +CONFIG_IP_VS_LBLC=y +CONFIG_IP_VS_LBLCR=y +CONFIG_IP_VS_DH=y +CONFIG_IP_VS_SH=y +CONFIG_IP_VS_MH=y +CONFIG_IP_VS_SED=y +CONFIG_IP_VS_NQ=y + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=y +CONFIG_IP_VS_NFCT=y +# CONFIG_IP_VS_PE_SIP is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_NF_TPROXY_IPV4=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=y +CONFIG_NFT_DUP_IPV4=y +# CONFIG_NFT_FIB_IPV4 is not set +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=y +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_SNMP_BASIC=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_SYNPROXY=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_CLUSTERIP=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=y +CONFIG_NF_TPROXY_IPV6=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=y +CONFIG_NFT_DUP_IPV6=y +# CONFIG_NFT_FIB_IPV6 is not set +CONFIG_NF_DUP_IPV6=y +CONFIG_NF_REJECT_IPV6=y +CONFIG_NF_LOG_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_MATCH_RT=y +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_TARGET_SYNPROXY=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_IP6_NF_SECURITY=y +CONFIG_IP6_NF_NAT=y +CONFIG_IP6_NF_TARGET_MASQUERADE=y +CONFIG_IP6_NF_TARGET_NPT=y +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_TABLES_BRIDGE=y +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=y +CONFIG_NF_LOG_BRIDGE=y +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_802_3=y +CONFIG_BRIDGE_EBT_AMONG=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_LIMIT=y +CONFIG_BRIDGE_EBT_MARK=y +CONFIG_BRIDGE_EBT_PKTTYPE=y +CONFIG_BRIDGE_EBT_STP=y +CONFIG_BRIDGE_EBT_VLAN=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_MARK_T=y +CONFIG_BRIDGE_EBT_REDIRECT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE_EBT_LOG=y +CONFIG_BRIDGE_EBT_NFLOG=y +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=y +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_CAKE is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +CONFIG_NET_SCH_INGRESS=m +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_ETS is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=y +CONFIG_NET_CLS_TCINDEX=y +CONFIG_NET_CLS_ROUTE4=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=y +CONFIG_NET_CLS_RSVP6=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=y +# CONFIG_NET_CLS_FLOWER is not set +CONFIG_NET_CLS_MATCHALL=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_EMATCH_IPSET=y +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=y +CONFIG_NET_ACT_NAT=y +CONFIG_NET_ACT_PEDIT=y +CONFIG_NET_ACT_SIMP=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_NET_ACT_CSUM=y +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +CONFIG_NET_ACT_BPF=y +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=y +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_CAN is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +# CONFIG_PCIEAER is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set +# CONFIG_HOTPLUG_PCI_S390 is not set + +# +# PCI controller drivers +# + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +# end of Generic Driver Options + +# +# Bus devices +# +# end of Bus devices + +# CONFIG_CONNECTOR is not set +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=y +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +CONFIG_ATA_OVER_ETH=m + +# +# S/390 block device drivers +# +CONFIG_BLK_DEV_XPRAM=m +CONFIG_DCSSBLK=m +CONFIG_DASD=y +CONFIG_DASD_PROFILE=y +CONFIG_DASD_ECKD=y +CONFIG_DASD_FBA=y +CONFIG_DASD_DIAG=y +CONFIG_DASD_EER=y +CONFIG_SCM_BLOCK=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC & related support +# +# CONFIG_VOP_BUS is not set +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_DH is not set +# end of SCSI device support + +CONFIG_ATA=y +# CONFIG_ATA_VERBOSE_ERROR is not set +# CONFIG_SATA_PMP is not set + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +CONFIG_PATA_SIS=y +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +CONFIG_ATA_GENERIC=y +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=y +CONFIG_DM_PERSISTENT_DATA=y +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=y +# CONFIG_DM_CACHE is not set +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_CLONE is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_DELAY is not set +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_IFB is not set +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=y +# CONFIG_IPVTAP is not set +CONFIG_VXLAN=y +CONFIG_GENEVE=m +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=y +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +CONFIG_NLMON=y +# CONFIG_NET_VRF is not set +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +# CONFIG_MLX5_CORE_IPOIB is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=m + +# +# MII PHY device drivers +# +# CONFIG_ADIN_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_DP83869_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +# CONFIG_SLIP is not set +CONFIG_SLHC=m + +# +# S/390 network device drivers +# +CONFIG_LCS=m +CONFIG_CTCM=m +CONFIG_NETIUCV=m +CONFIG_SMSGIUCV=m +CONFIG_SMSGIUCV_EVENT=m +CONFIG_QETH=y +CONFIG_QETH_L2=y +CONFIG_QETH_L3=y +CONFIG_CCWGROUP=y +# end of S/390 network device drivers + +# +# Host-side USB support is needed for USB Network Adapter support +# + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IUCV=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_HW_RANDOM_S390=y +# CONFIG_APPLICOM is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_HANGCHECK_TIMER is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +CONFIG_DEVPORT=y + +# +# S/390 character device drivers +# +CONFIG_TN3270=y +CONFIG_TN3270_TTY=y +CONFIG_TN3270_FS=m +CONFIG_TN3270_CONSOLE=y +CONFIG_TN3215=y +CONFIG_TN3215_CONSOLE=y +CONFIG_CCW_CONSOLE=y +CONFIG_SCLP_TTY=y +CONFIG_SCLP_CONSOLE=y +CONFIG_SCLP_VT220_TTY=y +CONFIG_SCLP_VT220_CONSOLE=y +CONFIG_HMC_DRV=m +# CONFIG_SCLP_OFB is not set +CONFIG_S390_TAPE=m + +# +# S/390 tape hardware support +# +CONFIG_S390_TAPE_34XX=m +CONFIG_S390_TAPE_3590=m +CONFIG_VMLOGRDR=m +CONFIG_VMCP=y +CONFIG_VMCP_CMA_SIZE=4 +CONFIG_MONREADER=m +CONFIG_MONWRITER=m +CONFIG_S390_VMUR=m +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# end of PTP clock support + +# CONFIG_PINCTRL is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP513 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_DRM is not set + +# +# ARM devices +# +# end of ARM devices + +# +# ACP (Audio CoProcessor) Configuration +# +# end of ACP (Audio CoProcessor) Configuration + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +# CONFIG_LCD_CLASS_DEVICE is not set +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +# end of Backlight & LCD device support + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_STAGING is not set +# CONFIG_GOLDFISH is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +CONFIG_MAILBOX=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_S390_IOMMU=y +# CONFIG_S390_CCW_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_BRCMSTB_RESCAL is not set +# CONFIG_RESET_TI_SYSCON is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_INTEL_EMMC is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +# CONFIG_RAS is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +# CONFIG_NVMEM is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_VALIDATE_FS_PARSER=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_DAX_LIMITED=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_ALGS=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +# CONFIG_AUTOFS4_FS is not set +# CONFIG_AUTOFS_FS is not set +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=y +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="utf8" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +# CONFIG_CONFIGFS_FS is not set +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +# CONFIG_NFSD_V4_SECURITY_LABEL is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +# CONFIG_CIFS_ROOT is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY_DMESG_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="/sbin/usermode-helper" +# CONFIG_SECURITY_SELINUX is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_WP512 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# +CONFIG_GCC_PLUGIN_STRUCTLEAK=y + +# +# Memory initialization +# +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y +# CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_CURVE25519 is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=y +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=y +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=y +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=y + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_VMAC=y + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_XXHASH=m +CONFIG_CRYPTO_BLAKE2B=m +# CONFIG_CRYPTO_BLAKE2S is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=y +CONFIG_CRYPTO_RMD128=y +CONFIG_CRYPTO_RMD160=y +CONFIG_CRYPTO_RMD256=y +CONFIG_CRYPTO_RMD320=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=y +CONFIG_CRYPTO_WP512=y + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_BLOWFISH_COMMON=y +CONFIG_CRYPTO_CAMELLIA=y +CONFIG_CRYPTO_CAST_COMMON=y +CONFIG_CRYPTO_CAST5=y +CONFIG_CRYPTO_CAST6=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_FCRYPT=y +CONFIG_CRYPTO_KHAZAD=y +CONFIG_CRYPTO_SALSA20=y +CONFIG_CRYPTO_CHACHA20=y +CONFIG_CRYPTO_SEED=y +CONFIG_CRYPTO_SERPENT=y +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_842=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_LZ4HC=y +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m +CONFIG_CRYPTO_LIB_BLAKE2S=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_HW=y +# CONFIG_ZCRYPT is not set +# CONFIG_CRYPTO_SHA1_S390 is not set +# CONFIG_CRYPTO_SHA256_S390 is not set +# CONFIG_CRYPTO_SHA512_S390 is not set +# CONFIG_CRYPTO_SHA3_256_S390 is not set +# CONFIG_CRYPTO_SHA3_512_S390 is not set +# CONFIG_CRYPTO_DES_S390 is not set +# CONFIG_CRYPTO_AES_S390 is not set +CONFIG_S390_PRNG=m +# CONFIG_CRYPTO_GHASH_S390 is not set +# CONFIG_CRYPTO_CRC32_S390 is not set +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +# CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE is not set +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +# CONFIG_CORDIC is not set +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=y +CONFIG_842_DECOMPRESS=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_DFLTCC=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_DMA_CMA is not set +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_IOMMU_HELPER=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +# CONFIG_IRQ_POLL is not set +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_DYNAMIC_DEBUG is not set +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_SPLIT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_BTF is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_FS=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_SANITIZE_ALL is not set +CONFIG_UBSAN_NO_ALIGNMENT=y +# CONFIG_TEST_UBSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_NO_SANITY=y +CONFIG_PAGE_POISONING_ZERO=y +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_WQ_WATCHDOG=y +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_NOP_MCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_SAMPLES is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# s390 Debugging +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_S390_PTDUMP is not set +CONFIG_EARLY_PRINTK=y +# end of s390 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking diff --git a/kernel/config-5.6.x-x86_64 b/kernel/config-5.6.x-x86_64 new file mode 100644 index 000000000..d1e426ddd --- /dev/null +++ b/kernel/config-5.6.x-x86_64 @@ -0,0 +1,4699 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 5.6.2 Kernel Configuration +# + +# +# Compiler: gcc (Alpine 8.3.0) 8.3.0 +# +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=80300 +CONFIG_CLANG_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_WARN_MAYBE_UNINITIALIZED=y +CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-linuxkit" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_BOOT_CONFIG is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PRINTK_NMI=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_BPF_SYSCALL=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +# CONFIG_USERFAULTFD is not set +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_RSEQ=y +# CONFIG_DEBUG_RSEQ is not set +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_FILTER_PGPROT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ZONE_DMA32=y +CONFIG_AUDIT_ARCH=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_ZONE_DMA=y +CONFIG_SMP=y +CONFIG_X86_FEATURE_NAMES=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_RETPOLINE=y +# CONFIG_X86_CPU_RESCTRL is not set +# CONFIG_X86_EXTENDED_PLATFORM is not set +# CONFIG_X86_INTEL_LPSS is not set +# CONFIG_X86_AMD_PLATFORM_DEVICE is not set +# CONFIG_IOSF_MBI is not set +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_XXL=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +CONFIG_XEN_PV=y +CONFIG_XEN_PV_SMP=y +CONFIG_XEN_DOM0=y +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_512GB=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +CONFIG_XEN_PVH=y +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_PVH=y +# CONFIG_KVM_DEBUG_FS is not set +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +# CONFIG_PROCESSOR_SELECT is not set +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=128 +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +# CONFIG_X86_MCE is not set + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=y +CONFIG_PERF_EVENTS_INTEL_RAPL=y +CONFIG_PERF_EVENTS_INTEL_CSTATE=y +# CONFIG_PERF_EVENTS_AMD_POWER is not set +# end of Performance monitoring + +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +# CONFIG_I8K is not set +CONFIG_MICROCODE=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +# CONFIG_X86_CPA_STATISTICS is not set +# CONFIG_AMD_MEM_ENCRYPT is not set +# CONFIG_NUMA is not set +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +# CONFIG_X86_PMEM_LEGACY is not set +# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set +CONFIG_X86_RESERVE_LOW=64 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_RANDOM=y +CONFIG_X86_SMAP=y +CONFIG_X86_UMIP=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_TSX_MODE_OFF=y +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +# CONFIG_X86_INTEL_TSX_MODE_AUTO is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_SECCOMP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +# CONFIG_KEXEC is not set +# CONFIG_KEXEC_FILE is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +CONFIG_HOTPLUG_CPU=y +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +# CONFIG_COMPAT_VDSO is not set +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set +# CONFIG_LEGACY_VSYSCALL_XONLY is not set +CONFIG_LEGACY_VSYSCALL_NONE=y +# CONFIG_CMDLINE_BOOL is not set +# CONFIG_MODIFY_LDT_SYSCALL is not set +CONFIG_HAVE_LIVEPATCH=y +# end of Processor type and features + +CONFIG_ARCH_HAS_ADD_PAGES=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y + +# +# Power management and ACPI options +# +# CONFIG_SUSPEND is not set +CONFIG_HIBERNATE_CALLBACKS=y +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_LPIT=y +# CONFIG_ACPI_PROCFS_POWER is not set +# CONFIG_ACPI_REV_OVERRIDE_POSSIBLE is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_HOTPLUG_MEMORY is not set +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NFIT=y +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +# CONFIG_ACPI_APEI_EINJ is not set +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_DPTF_POWER is not set +# CONFIG_PMIC_OPREGION is not set +# CONFIG_ACPI_CONFIGFS is not set +CONFIG_X86_PM_TIMER=y +# CONFIG_SFI is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_PCC_CPUFREQ=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=y +# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=y + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=y +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +# CONFIG_ISA_BUS is not set +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# CONFIG_X86_SYSFB is not set +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32 is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_SYSVIPC_COMPAT=y +# end of Binary Emulations + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +# CONFIG_ISCSI_IBFT is not set +# CONFIG_FW_CFG_SYSFS is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_VARS=y +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +CONFIG_RESET_ATTACK_MITIGATION=y +# CONFIG_EFI_RCI2_TABLE is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y +CONFIG_EFI_EARLYCON=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_WERROR=y +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_HOTPLUG_SMT=y +CONFIG_OPROFILE=y +# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set +CONFIG_HAVE_OPROFILE=y +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_CC_HAS_STACKPROTECTOR_NONE=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_PLUGIN_HOSTCC="g++" +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_GCC_PLUGIN_RANDSTRUCT=y +CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE=y +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +# CONFIG_BLK_DEV_ZONED is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_CMDLINE_PARSER=y +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +CONFIG_CMDLINE_PARTITION=y +# end of Partition Types + +CONFIG_BLOCK_COMPAT=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ASN1=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_VIRT_TO_BUS=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_THP_SWAP=y +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_CMA is not set +# CONFIG_MEM_SOFT_DIRTY is not set +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DEVICE=y +CONFIG_DEV_PAGEMAP_OPS=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=y +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +# CONFIG_XFRM_INTERFACE is not set +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XDP_SOCKETS=y +# CONFIG_XDP_SOCKETS_DIAG is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_NET_IP_TUNNEL=y +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=y +CONFIG_NET_FOU=y +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_ESP_OFFLOAD is not set +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_UDP_DIAG=y +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=y +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=y +CONFIG_IPV6_FOU_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +# CONFIG_MPTCP is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NETFILTER_NETLINK_OSF=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_LOG_COMMON=y +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NETFILTER_CONNCOUNT=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_SECMARK is not set +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NF_CT_NETLINK_HELPER=y +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_SIP=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=y +CONFIG_NF_TABLES=y +# CONFIG_NF_TABLES_SET is not set +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +# CONFIG_NFT_NUMGEN is not set +CONFIG_NFT_CT=y +CONFIG_NFT_COUNTER=y +CONFIG_NFT_CONNLIMIT=y +CONFIG_NFT_LOG=y +CONFIG_NFT_LIMIT=y +CONFIG_NFT_MASQ=y +CONFIG_NFT_REDIR=y +CONFIG_NFT_NAT=y +CONFIG_NFT_TUNNEL=y +# CONFIG_NFT_OBJREF is not set +CONFIG_NFT_QUEUE=y +# CONFIG_NFT_QUOTA is not set +CONFIG_NFT_REJECT=y +CONFIG_NFT_REJECT_INET=y +CONFIG_NFT_COMPAT=y +CONFIG_NFT_HASH=y +# CONFIG_NFT_XFRM is not set +# CONFIG_NFT_SOCKET is not set +CONFIG_NFT_OSF=y +CONFIG_NFT_TPROXY=y +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=y +CONFIG_NFT_DUP_NETDEV=y +CONFIG_NFT_FWD_NETDEV=y +# CONFIG_NF_FLOW_TABLE is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y +CONFIG_NETFILTER_XT_SET=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CT=y +CONFIG_NETFILTER_XT_TARGET_DSCP=y +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_HMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_NAT=y +CONFIG_NETFILTER_XT_TARGET_NETMAP=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_RATEEST=y +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_SECMARK is not set +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CLUSTER=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_DCCP=y +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ECN=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y +CONFIG_NETFILTER_XT_MATCH_L2TP=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_NFACCT=y +CONFIG_NETFILTER_XT_MATCH_OSF=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_RATEEST=y +CONFIG_NETFILTER_XT_MATCH_REALM=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SCTP=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# end of Core Netfilter Configuration + +CONFIG_IP_SET=y +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=y +CONFIG_IP_SET_BITMAP_IPMAC=y +CONFIG_IP_SET_BITMAP_PORT=y +CONFIG_IP_SET_HASH_IP=y +# CONFIG_IP_SET_HASH_IPMARK is not set +CONFIG_IP_SET_HASH_IPPORT=y +CONFIG_IP_SET_HASH_IPPORTIP=y +CONFIG_IP_SET_HASH_IPPORTNET=y +# CONFIG_IP_SET_HASH_IPMAC is not set +# CONFIG_IP_SET_HASH_MAC is not set +# CONFIG_IP_SET_HASH_NETPORTNET is not set +CONFIG_IP_SET_HASH_NET=y +# CONFIG_IP_SET_HASH_NETNET is not set +CONFIG_IP_SET_HASH_NETPORT=y +CONFIG_IP_SET_HASH_NETIFACE=y +CONFIG_IP_SET_LIST_SET=y +CONFIG_IP_VS=y +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=y +CONFIG_IP_VS_WRR=y +CONFIG_IP_VS_LC=y +CONFIG_IP_VS_WLC=y +CONFIG_IP_VS_FO=y +CONFIG_IP_VS_OVF=y +CONFIG_IP_VS_LBLC=y +CONFIG_IP_VS_LBLCR=y +CONFIG_IP_VS_DH=y +CONFIG_IP_VS_SH=y +CONFIG_IP_VS_MH=y +CONFIG_IP_VS_SED=y +CONFIG_IP_VS_NQ=y + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=y +CONFIG_IP_VS_NFCT=y +# CONFIG_IP_VS_PE_SIP is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_NF_TPROXY_IPV4=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=y +CONFIG_NFT_DUP_IPV4=y +# CONFIG_NFT_FIB_IPV4 is not set +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=y +CONFIG_NF_LOG_ARP=y +CONFIG_NF_LOG_IPV4=y +CONFIG_NF_REJECT_IPV4=y +CONFIG_NF_NAT_SNMP_BASIC=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_SYNPROXY=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_CLUSTERIP=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=y +CONFIG_NF_TPROXY_IPV6=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=y +CONFIG_NFT_DUP_IPV6=y +# CONFIG_NFT_FIB_IPV6 is not set +CONFIG_NF_DUP_IPV6=y +CONFIG_NF_REJECT_IPV6=y +CONFIG_NF_LOG_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_MATCH_RT=y +# CONFIG_IP6_NF_MATCH_SRH is not set +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_TARGET_SYNPROXY=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_IP6_NF_SECURITY=y +CONFIG_IP6_NF_NAT=y +CONFIG_IP6_NF_TARGET_MASQUERADE=y +CONFIG_IP6_NF_TARGET_NPT=y +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_TABLES_BRIDGE=y +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=y +CONFIG_NF_LOG_BRIDGE=y +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_802_3=y +CONFIG_BRIDGE_EBT_AMONG=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_LIMIT=y +CONFIG_BRIDGE_EBT_MARK=y +CONFIG_BRIDGE_EBT_PKTTYPE=y +CONFIG_BRIDGE_EBT_STP=y +CONFIG_BRIDGE_EBT_VLAN=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_MARK_T=y +CONFIG_BRIDGE_EBT_REDIRECT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE_EBT_LOG=y +CONFIG_BRIDGE_EBT_NFLOG=y +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +# CONFIG_L2TP_V3 is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=y +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_CAKE is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +CONFIG_NET_SCH_INGRESS=m +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_ETS is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=y +CONFIG_NET_CLS_TCINDEX=y +CONFIG_NET_CLS_ROUTE4=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=y +CONFIG_NET_CLS_RSVP6=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=y +# CONFIG_NET_CLS_FLOWER is not set +CONFIG_NET_CLS_MATCHALL=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_EMATCH_IPSET=y +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_SAMPLE is not set +CONFIG_NET_ACT_IPT=y +CONFIG_NET_ACT_NAT=y +CONFIG_NET_ACT_PEDIT=y +CONFIG_NET_ACT_SIMP=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_NET_ACT_CSUM=y +# CONFIG_NET_ACT_MPLS is not set +# CONFIG_NET_ACT_VLAN is not set +CONFIG_NET_ACT_BPF=y +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=y +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_XEN is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +# CONFIG_PCIEAER is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_BW is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_XEN_PCIDEV_FRONTEND=y +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_HYPERV is not set +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_VMD is not set +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +# end of Generic Driver Options + +# +# Bus devices +# +# end of Bus devices + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y +# CONFIG_GNSS is not set +# CONFIG_MTD is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_CRYPTOLOOP=y +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=y +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=65536 +# CONFIG_CDROM_PKTCDVD is not set +CONFIG_ATA_OVER_ETH=m +CONFIG_XEN_BLKDEV_FRONTEND=y +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +# CONFIG_NVME_MULTIPATH is not set +# CONFIG_NVME_HWMON is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set +# CONFIG_INTEL_MEI is not set +# CONFIG_INTEL_MEI_ME is not set +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_VMWARE_VMCI is not set + +# +# Intel MIC & related support +# +# CONFIG_INTEL_MIC_BUS is not set +# CONFIG_SCIF_BUS is not set +# CONFIG_VOP_BUS is not set +# end of Intel MIC & related support + +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=y +CONFIG_XEN_SCSI_FRONTEND=y +CONFIG_HYPERV_STORAGE=y +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_ISCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +CONFIG_SCSI_VIRTIO=y +# CONFIG_SCSI_DH is not set +# end of SCSI device support + +CONFIG_ATA=y +# CONFIG_ATA_VERBOSE_ERROR is not set +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +# CONFIG_SATA_PMP is not set + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +CONFIG_SATA_MV=y +CONFIG_SATA_NV=y +CONFIG_SATA_PROMISE=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_SVW=y +CONFIG_SATA_ULI=y +CONFIG_SATA_VIA=y +CONFIG_SATA_VITESSE=y + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +CONFIG_PATA_SIS=y +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=y +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=y +CONFIG_DM_PERSISTENT_DATA=y +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_THIN_PROVISIONING=y +# CONFIG_DM_CACHE is not set +# CONFIG_DM_WRITECACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_CLONE is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_DELAY is not set +# CONFIG_DM_DUST is not set +# CONFIG_DM_INIT is not set +# CONFIG_DM_UEVENT is not set +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +# CONFIG_TARGET_CORE is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=y +# CONFIG_FUSION_SAS is not set +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LOGGING is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_IFB is not set +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=y +# CONFIG_IPVTAP is not set +CONFIG_VXLAN=y +CONFIG_GENEVE=m +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=y +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=y +CONFIG_VIRTIO_NET=y +CONFIG_NLMON=y +# CONFIG_NET_VRF is not set +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_CX_ECAT is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +# CONFIG_MLX5_FPGA is not set +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +# CONFIG_MLX5_CORE_IPOIB is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_MSCC_OCELOT_SWITCH is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_THUNDER is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_ADIN_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_BCM84881_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_DP83869_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +# CONFIG_SLIP is not set +CONFIG_SLHC=m +# CONFIG_USB_NET_DRIVERS is not set +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +CONFIG_WLAN_VENDOR_ADMTEK=y +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_WLAN_VENDOR_ATMEL=y +CONFIG_WLAN_VENDOR_BROADCOM=y +CONFIG_WLAN_VENDOR_CISCO=y +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_WLAN_VENDOR_INTERSIL=y +# CONFIG_HOSTAP is not set +# CONFIG_PRISM54 is not set +CONFIG_WLAN_VENDOR_MARVELL=y +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_WLAN_VENDOR_REALTEK=y +CONFIG_WLAN_VENDOR_RSI=y +CONFIG_WLAN_VENDOR_ST=y +CONFIG_WLAN_VENDOR_TI=y +CONFIG_WLAN_VENDOR_ZYDAS=y +CONFIG_WLAN_VENDOR_QUANTENNA=y + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +CONFIG_XEN_NETDEV_FRONTEND=y +CONFIG_VMXNET3=y +# CONFIG_FUJITSU_ES is not set +CONFIG_HYPERV_NET=y +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MSM_VIBRATOR is not set +CONFIG_INPUT_PCSPKR=y +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_ATLAS_BTNS=y +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_KXTJ9 is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +CONFIG_SERIO_PCIPS2=y +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=y +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +CONFIG_HYPERV_KEYBOARD=y +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +# CONFIG_NULL_TTY is not set +CONFIG_LDISC_AUTOLOAD=y +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +CONFIG_SERIAL_8250_DWLIB=y +# CONFIG_SERIAL_8250_DW is not set +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +# CONFIG_SERIAL_8250_MID is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# end of Serial drivers + +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=y +CONFIG_HW_RANDOM_INTEL=y +CONFIG_HW_RANDOM_AMD=y +CONFIG_HW_RANDOM_VIA=y +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_NVRAM=y +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +# CONFIG_RAW_DRIVER is not set +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +CONFIG_HPET_MMAP_DEFAULT=y +CONFIG_HANGCHECK_TIMER=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_XEN=m +CONFIG_TCG_CRB=y +CONFIG_TCG_VTPM_PROXY=m +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TELCLOCK is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_CPU is not set +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_ISMT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# end of PTP clock support + +# CONFIG_PINCTRL is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_ABITUGURU is not set +# CONFIG_SENSORS_ABITUGURU3 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_K8TEMP is not set +# CONFIG_SENSORS_K10TEMP is not set +# CONFIG_SENSORS_FAM15H_POWER is not set +# CONFIG_SENSORS_APPLESMC is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_DELL_SMM is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_I5500 is not set +# CONFIG_SENSORS_CORETEMP is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP513 is not set +# CONFIG_SENSORS_VIA_CPUTEMP is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_SENSORS_ATK0110 is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +# CONFIG_THERMAL_HWMON is not set +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CLOCK_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +# CONFIG_INTEL_POWERCLAMP is not set +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +# CONFIG_INT340X_THERMAL is not set +# end of ACPI INT340X thermal drivers + +# CONFIG_INTEL_PCH_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=y +CONFIG_LPC_SCH=y +# CONFIG_MFD_INTEL_LPSS_ACPI is not set +# CONFIG_MFD_INTEL_LPSS_PCI is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +CONFIG_MFD_WL1273_CORE=y +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=y +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_AGP is not set +# CONFIG_VGA_ARB is not set +# CONFIG_VGA_SWITCHEROO is not set +# CONFIG_DRM is not set + +# +# ARM devices +# +# end of ARM devices + +# +# ACP (Audio CoProcessor) Configuration +# +# end of ACP (Audio CoProcessor) Configuration + +# CONFIG_DRM_XEN is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +CONFIG_XEN_FBDEV_FRONTEND=y +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=y +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +# CONFIG_LCD_CLASS_DEVICE is not set +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +# end of Backlight & LCD device support + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +# CONFIG_VGACON_SOFT_SCROLLBACK is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +# CONFIG_LOGO is not set +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_HYPERV_MOUSE is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set +# end of Intel ISH HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=m +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=m +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=m +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=m +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +CONFIG_USB_OHCI_HCD=m +CONFIG_USB_OHCI_HCD_PCI=m +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=m +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +# CONFIG_SYNC_FILE is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=m +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_UIO_HV_GENERIC is not set +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +# CONFIG_VIRTIO_PMEM is not set +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=y +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=y +CONFIG_HYPERV_BALLOON=y +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +CONFIG_XEN_BALLOON=y +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT=512 +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y +CONFIG_XEN_DEV_EVTCHN=y +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=y +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XEN_GNTDEV=y +CONFIG_XEN_GRANT_DEV_ALLOC=y +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +CONFIG_SWIOTLB_XEN=y +CONFIG_XEN_PVCALLS_FRONTEND=y +CONFIG_XEN_PRIVCMD=y +CONFIG_XEN_ACPI_PROCESSOR=y +CONFIG_XEN_HAVE_PVMMU=y +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_SYMS is not set +CONFIG_XEN_HAVE_VPMU=y +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_STAGING is not set +CONFIG_X86_PLATFORM_DEVICES=y +# CONFIG_ACER_WIRELESS is not set +# CONFIG_ACERHDF is not set +# CONFIG_DCDBAS is not set +# CONFIG_DELL_SMBIOS is not set +# CONFIG_DELL_SMO8800 is not set +# CONFIG_DELL_RBU is not set +# CONFIG_FUJITSU_TABLET is not set +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_HP_ACCEL is not set +# CONFIG_HP_WIRELESS is not set +# CONFIG_SENSORS_HDAPS is not set +# CONFIG_INTEL_MENLOW is not set +# CONFIG_ASUS_WIRELESS is not set +# CONFIG_ACPI_WMI is not set +# CONFIG_TOPSTAR_LAPTOP is not set +# CONFIG_TOSHIBA_BT_RFKILL is not set +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_ACPI_CMPC is not set +# CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_VBTN is not set +CONFIG_INTEL_IPS=y +# CONFIG_INTEL_PMC_CORE is not set +# CONFIG_IBM_RTL is not set +# CONFIG_SAMSUNG_Q10 is not set +# CONFIG_INTEL_RST is not set +# CONFIG_INTEL_SMARTCONNECT is not set +# CONFIG_INTEL_PMC_IPC is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +# CONFIG_INTEL_PUNIT_IPC is not set +# CONFIG_INTEL_TURBO_MAX_3 is not set +# CONFIG_I2C_MULTI_INSTANTIATE is not set +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set + +# +# Intel Speed Select Technology interface support +# +# CONFIG_INTEL_SPEED_SELECT_INTERFACE is not set +# end of Intel Speed Select Technology interface support + +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_PMC_ATOM=y +# CONFIG_MFD_CROS_EC is not set +# CONFIG_CHROME_PLATFORMS is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# end of Common Clock Framework + +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +# CONFIG_IOMMU_SUPPORT is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set +# CONFIG_DEVFREQ_GOV_USERSPACE is not set +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_BRCMSTB_RESCAL is not set +# CONFIG_RESET_TI_SYSCON is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_INTEL_EMMC is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +# CONFIG_RAS is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_BLK=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +CONFIG_DAX_DRIVER=y +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_KMEM=y +CONFIG_DEV_DAX_PMEM_COMPAT=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_TEE is not set +CONFIG_PM_OPP=y +# CONFIG_UNISYS_VISORBUS is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_VALIDATE_FS_PARSER=y +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_ALGS=y +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +# CONFIG_AUTOFS4_FS is not set +# CONFIG_AUTOFS_FS is not set +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +# CONFIG_VIRTIO_FS is not set +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +# CONFIG_OVERLAY_FS_INDEX is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set + +# +# Caches +# +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_CACHEFILES=y +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="utf8" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set +# end of DOS/FAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +# CONFIG_CONFIGFS_FS is not set +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFLATE_COMPRESS=y +# CONFIG_PSTORE_LZO_COMPRESS is not set +# CONFIG_PSTORE_LZ4_COMPRESS is not set +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set +# CONFIG_PSTORE_842_COMPRESS is not set +# CONFIG_PSTORE_ZSTD_COMPRESS is not set +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_DEFLATE_COMPRESS_DEFAULT=y +CONFIG_PSTORE_COMPRESS_DEFAULT="deflate" +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_EROFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +CONFIG_NFSD_V4=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +# CONFIG_NFSD_SCSILAYOUT is not set +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +# CONFIG_NFSD_V4_SECURITY_LABEL is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +# CONFIG_CIFS_ALLOW_INSECURE_LEGACY is not set +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +# CONFIG_CIFS_ROOT is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_ENCRYPTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY_DMESG_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_FALLBACK=y +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="/sbin/usermode-helper" +# CONFIG_SECURITY_SELINUX is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +# CONFIG_IMA_TEMPLATE is not set +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_WP512 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="yama,loadpin,safesetid,integrity" + +# +# Kernel hardening options +# +CONFIG_GCC_PLUGIN_STRUCTLEAK=y + +# +# Memory initialization +# +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y +# CONFIG_GCC_PLUGIN_STRUCTLEAK_VERBOSE is not set +CONFIG_GCC_PLUGIN_STACKLEAK=y +CONFIG_STACKLEAK_TRACK_MIN_SIZE=100 +# CONFIG_STACKLEAK_METRICS is not set +# CONFIG_STACKLEAK_RUNTIME_DISABLE is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_GLUE_HELPER_X86=y +CONFIG_CRYPTO_ENGINE=m + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_CURVE25519 is not set +CONFIG_CRYPTO_CURVE25519_X86=m + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=y +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=y +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=y +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_KEYWRAP=y +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=y + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_VMAC=y + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=y +CONFIG_CRYPTO_CRC32=y +CONFIG_CRYPTO_CRC32_PCLMUL=y +CONFIG_CRYPTO_XXHASH=m +CONFIG_CRYPTO_BLAKE2B=m +# CONFIG_CRYPTO_BLAKE2S is not set +CONFIG_CRYPTO_BLAKE2S_X86=m +CONFIG_CRYPTO_CRCT10DIF=y +# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_POLY1305=y +CONFIG_CRYPTO_POLY1305_X86_64=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=y +CONFIG_CRYPTO_RMD128=y +CONFIG_CRYPTO_RMD160=y +CONFIG_CRYPTO_RMD256=y +CONFIG_CRYPTO_RMD320=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_TGR192=y +CONFIG_CRYPTO_WP512=y +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_ANUBIS=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_BLOWFISH=y +CONFIG_CRYPTO_BLOWFISH_COMMON=y +CONFIG_CRYPTO_BLOWFISH_X86_64=y +CONFIG_CRYPTO_CAMELLIA=y +CONFIG_CRYPTO_CAMELLIA_X86_64=y +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=y +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_CAST_COMMON=y +CONFIG_CRYPTO_CAST5=y +CONFIG_CRYPTO_CAST5_AVX_X86_64=y +CONFIG_CRYPTO_CAST6=y +CONFIG_CRYPTO_CAST6_AVX_X86_64=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DES3_EDE_X86_64=y +CONFIG_CRYPTO_FCRYPT=y +CONFIG_CRYPTO_KHAZAD=y +CONFIG_CRYPTO_SALSA20=y +CONFIG_CRYPTO_CHACHA20=y +CONFIG_CRYPTO_CHACHA20_X86_64=y +CONFIG_CRYPTO_SEED=y +CONFIG_CRYPTO_SERPENT=y +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=y +CONFIG_CRYPTO_SERPENT_AVX_X86_64=y +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=y +# CONFIG_CRYPTO_SM4 is not set +CONFIG_CRYPTO_TEA=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y +CONFIG_CRYPTO_TWOFISH_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_842=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_LZ4HC=y +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +# CONFIG_CRYPTO_STATS is not set +CONFIG_CRYPTO_HASH_INFO=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=m +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m +CONFIG_CRYPTO_LIB_BLAKE2S=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=y +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=y +CONFIG_CRYPTO_DEV_PADLOCK_AES=y +CONFIG_CRYPTO_DEV_PADLOCK_SHA=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +# CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +# CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE is not set +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +# CONFIG_CORDIC is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC64 is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=y +CONFIG_842_DECOMPRESS=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=y +CONFIG_LZ4HC_COMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=m +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_SWIOTLB=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_SPLIT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_BTF is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_OPTIMIZE_INLINING=y +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_STACK_VALIDATION=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_FS=y +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_SANITIZE_ALL is not set +CONFIG_UBSAN_NO_ALIGNMENT=y +# CONFIG_TEST_UBSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_NO_SANITY=y +CONFIG_PAGE_POISONING_ZERO=y +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +# CONFIG_KASAN is not set +CONFIG_KASAN_STACK=1 +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_WQ_WATCHDOG=y +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_PREEMPTIRQ_EVENTS is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +CONFIG_MMIOTRACE=y +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_HIST_TRIGGERS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_MMIOTRACE_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +# CONFIG_SAMPLES is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +CONFIG_IO_STRICT_DEVMEM=y + +# +# x86 Debugging +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_X86_VERBOSE_BOOTUP=y +CONFIG_EARLY_PRINTK=y +# CONFIG_EARLY_PRINTK_DBGP is not set +# CONFIG_EARLY_PRINTK_USB_XDBC is not set +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_WX is not set +CONFIG_DOUBLEFAULT=y +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +# CONFIG_DEBUG_BOOT_PARAMS is not set +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# CONFIG_UNWINDER_GUESS is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_KPROBES_SANITY_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_BITFIELD is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking diff --git a/kernel/patches-5.6.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch b/kernel/patches-5.6.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch new file mode 100644 index 000000000..8fe10ddf8 --- /dev/null +++ b/kernel/patches-5.6.x/0001-include-uapi-linux-swab-Fix-potentially-missing-__al.patch @@ -0,0 +1,55 @@ +From 26ed515a9a0fa1c65c4ddada44e803ab67970bae Mon Sep 17 00:00:00 2001 +From: Matt Redfearn +Date: Wed, 3 Jan 2018 09:57:30 +0000 +Subject: [PATCH] include/uapi/linux/swab: Fix potentially missing + __always_inline +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Commit bc27fb68aaad ("include/uapi/linux/byteorder, swab: force inlining +of some byteswap operations") added __always_inline to swab functions +and commit 283d75737837 ("uapi/linux/stddef.h: Provide __always_inline to +userspace headers") added a definition of __always_inline for use in +exported headers when the kernel's compiler.h is not available. + +However, since swab.h does not include stddef.h, if the header soup does +not indirectly include it, the definition of __always_inline is missing, +resulting in a compilation failure, which was observed compiling the +perf tool using exported headers containing this commit: + +In file included from /usr/include/linux/byteorder/little_endian.h:12:0, + from /usr/include/asm/byteorder.h:14, + from tools/include/uapi/linux/perf_event.h:20, + from perf.h:8, + from builtin-bench.c:18: +/usr/include/linux/swab.h:160:8: error: unknown type name ‘__always_inline’ + static __always_inline __u16 __swab16p(const __u16 *p) + +Fix this by replacing the inclusion of linux/compiler.h with +linux/stddef.h to ensure that we pick up that definition if required, +without relying on it's indirect inclusion. compiler.h is then included +indirectly, via stddef.h. + +Fixes: 283d75737837 ("uapi/linux/stddef.h: Provide __always_inline to userspace headers") +Signed-off-by: Matt Redfearn +--- + include/uapi/linux/swab.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h +index 7272f85d6d6a..3736f2fe1541 100644 +--- a/include/uapi/linux/swab.h ++++ b/include/uapi/linux/swab.h +@@ -3,7 +3,7 @@ + #define _UAPI_LINUX_SWAB_H + + #include +-#include ++#include + #include + #include + +-- +2.24.0 + From 42c51fab2bb273fe4f2a307ffde89361a71c1c8e Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 11:25:40 +0100 Subject: [PATCH 4/8] kernel: Use in kernel WireGuard if present 5.6.x has WireGuard upstream. Skip pulling it for kernel where WireGuard is present. Signed-off-by: Rolf Neugebauer --- kernel/Dockerfile | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel/Dockerfile b/kernel/Dockerfile index 8823d3ac7..9ae47b95d 100644 --- a/kernel/Dockerfile +++ b/kernel/Dockerfile @@ -163,12 +163,14 @@ RUN case $(uname -m) in \ cp System.map /out && \ ([ -n "${DEBUG}" ] && cp vmlinux /out || true) -# WireGuard -RUN curl -fsSL -o /wireguard.tar.xz "${WIREGUARD_URL}" && \ - echo "${WIREGUARD_SHA256} /wireguard.tar.xz" | sha256sum -c - && \ - cp /wireguard.tar.xz /out/src/ && \ - tar -C / --one-top-level=wireguard --strip-components=2 -xJf /wireguard.tar.xz "wireguard-linux-compat-${WIREGUARD_VERSION}/src" && \ - make -j "$(getconf _NPROCESSORS_ONLN)" M="/wireguard" modules +# WireGuard (skip kernels which have it in tree) +RUN if [ ! -d /linux/drivers/net/wireguard ]; then \ + curl -fsSL -o /wireguard.tar.xz "${WIREGUARD_URL}" && \ + echo "${WIREGUARD_SHA256} /wireguard.tar.xz" | sha256sum -c - && \ + cp /wireguard.tar.xz /out/src/ && \ + tar -C / --one-top-level=wireguard --strip-components=2 -xJf /wireguard.tar.xz "wireguard-linux-compat-${WIREGUARD_VERSION}/src" && \ + make -j "$(getconf _NPROCESSORS_ONLN)" M="/wireguard" modules; \ + fi # Modules and Device Tree binaries RUN make INSTALL_MOD_PATH=/tmp/kernel-modules modules_install && \ From 0bfaa3becc4cb130e075f9b19d76c2c4c03de122 Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 15:12:15 +0100 Subject: [PATCH 5/8] kernel: Don't install libunwind-dev on s390x This is a part revert of f49042545e27 ("libunwind-dev workaround on x86 is no longer required") Signed-off-by: Rolf Neugebauer --- kernel/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/Dockerfile b/kernel/Dockerfile index 9ae47b95d..3dd697ac5 100644 --- a/kernel/Dockerfile +++ b/kernel/Dockerfile @@ -16,7 +16,6 @@ RUN apk add \ installkernel \ kmod \ elfutils-dev \ - libunwind-dev \ linux-headers \ mpc1-dev \ mpfr-dev \ @@ -31,6 +30,9 @@ RUN apk add \ xz-dev \ zlib-dev +# libunwind-dev pkg is missing for s390x for now. Only install on other arch +RUN [ $(uname -m) != s390x ] && apk add libunwind-dev || true + ARG KERNEL_VERSION ARG KERNEL_SERIES ARG EXTRA From 57d5ab8fdf5a1f674250d6f57d1145e13a854c61 Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 15:56:20 +0100 Subject: [PATCH 6/8] kernel: Tweak s390x config For some reason, the 'make ARCH=s390 oldconfig' yields a different config when executing on a real s390c system... Signed-off-by: Rolf Neugebauer --- kernel/config-5.6.x-s390x | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/kernel/config-5.6.x-s390x b/kernel/config-5.6.x-s390x index 92f864626..d6c45ee90 100644 --- a/kernel/config-5.6.x-s390x +++ b/kernel/config-5.6.x-s390x @@ -247,7 +247,29 @@ CONFIG_HAVE_LIVEPATCH=y # # Processor type and features # +CONFIG_HAVE_MARCH_Z900_FEATURES=y +CONFIG_HAVE_MARCH_Z990_FEATURES=y +CONFIG_HAVE_MARCH_Z9_109_FEATURES=y +CONFIG_HAVE_MARCH_Z10_FEATURES=y +CONFIG_HAVE_MARCH_Z196_FEATURES=y +# CONFIG_MARCH_Z900 is not set +# CONFIG_MARCH_Z990 is not set +# CONFIG_MARCH_Z9_109 is not set +# CONFIG_MARCH_Z10 is not set +CONFIG_MARCH_Z196=y +# CONFIG_MARCH_ZEC12 is not set +# CONFIG_MARCH_Z13 is not set +# CONFIG_MARCH_Z14 is not set +CONFIG_MARCH_Z196_TUNE=y CONFIG_TUNE_DEFAULT=y +# CONFIG_TUNE_Z900 is not set +# CONFIG_TUNE_Z990 is not set +# CONFIG_TUNE_Z9_109 is not set +# CONFIG_TUNE_Z10 is not set +# CONFIG_TUNE_Z196 is not set +# CONFIG_TUNE_ZEC12 is not set +# CONFIG_TUNE_Z13 is not set +# CONFIG_TUNE_Z14 is not set CONFIG_64BIT=y CONFIG_COMPAT=y CONFIG_SYSVIPC_COMPAT=y @@ -418,7 +440,6 @@ CONFIG_OLD_SIGSUSPEND3=y CONFIG_OLD_SIGACTION=y CONFIG_COMPAT_OLD_SIGACTION=y CONFIG_COMPAT_32BIT_TIME=y -CONFIG_CPU_NO_EFFICIENT_FFS=y CONFIG_HAVE_ARCH_VMAP_STACK=y CONFIG_VMAP_STACK=y CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y @@ -1235,6 +1256,7 @@ CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_CLASSID=y CONFIG_NET_RX_BUSY_POLL=y CONFIG_BQL=y +# CONFIG_BPF_JIT is not set # CONFIG_BPF_STREAM_PARSER is not set CONFIG_NET_FLOW_LIMIT=y @@ -1270,6 +1292,7 @@ CONFIG_NET_DEVLINK=y CONFIG_PAGE_POOL=y CONFIG_FAILOVER=y CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y # # Device Drivers From 1e768f56eb1776872c8dabb8d2acd7f656fbb101 Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 18:04:19 +0100 Subject: [PATCH 7/8] Update YAMLs to latest kernels Signed-off-by: Rolf Neugebauer --- examples/aws.yml | 2 +- examples/azure.yml | 2 +- examples/cadvisor.yml | 2 +- examples/dm-crypt-loop.yml | 2 +- examples/dm-crypt.yml | 2 +- examples/docker-for-mac.yml | 2 +- examples/docker.yml | 2 +- examples/gcp.yml | 2 +- examples/getty.yml | 2 +- examples/hetzner.yml | 2 +- examples/hostmount-writeable-overlay.yml | 2 +- examples/influxdb-os.yml | 2 +- examples/logging.yml | 2 +- examples/minimal.yml | 2 +- examples/node_exporter.yml | 2 +- examples/openstack.yml | 2 +- examples/packet.arm64.yml | 2 +- examples/packet.yml | 2 +- examples/redis-os.yml | 2 +- examples/rt-for-vmware.yml | 2 +- examples/scaleway.yml | 2 +- examples/sshd.yml | 2 +- examples/static-ip.yml | 2 +- examples/swap.yml | 2 +- examples/tpm.yml | 2 +- examples/vmware.yml | 2 +- examples/vpnkit-forwarder.yml | 2 +- examples/vsudd-containerd.yml | 2 +- examples/vultr.yml | 2 +- examples/wireguard.yml | 2 +- linuxkit.yml | 2 +- projects/compose/compose-dynamic.yml | 2 +- projects/compose/compose-static.yml | 2 +- projects/miragesdk/examples/mirage-dhcp.yml | 2 +- test/cases/000_build/000_formats/test.yml | 2 +- test/cases/000_build/010_reproducible/test.yml | 2 +- .../010_platforms/000_qemu/000_run_kernel+initrd/test.yml | 2 +- .../010_platforms/000_qemu/005_run_kernel+squashfs/test.yml | 2 +- test/cases/010_platforms/000_qemu/010_run_iso/test.yml | 2 +- test/cases/010_platforms/000_qemu/020_run_efi/test.yml | 2 +- test/cases/010_platforms/000_qemu/030_run_qcow_bios/test.yml | 2 +- test/cases/010_platforms/000_qemu/040_run_raw_bios/test.yml | 2 +- test/cases/010_platforms/000_qemu/050_run_aws/test.yml | 2 +- test/cases/010_platforms/000_qemu/100_container/test.yml | 2 +- .../010_platforms/010_hyperkit/000_run_kernel+initrd/test.yml | 2 +- .../010_hyperkit/005_run_kernel+squashfs/test.yml | 2 +- test/cases/010_platforms/010_hyperkit/010_acpi/test.yml | 2 +- test/cases/010_platforms/110_gcp/000_run/test.yml | 2 +- test/cases/020_kernel/002_config_4.14.x/test.yml | 2 +- test/cases/020_kernel/005_config_4.19.x/test.yml | 2 +- test/cases/020_kernel/011_config_5.4.x/test.yml | 2 +- test/cases/020_kernel/102_kmod_4.14.x/Dockerfile | 2 +- test/cases/020_kernel/102_kmod_4.14.x/test.sh | 2 +- test/cases/020_kernel/102_kmod_4.14.x/test.yml | 2 +- test/cases/020_kernel/105_kmod_4.19.x/Dockerfile | 2 +- test/cases/020_kernel/105_kmod_4.19.x/test.sh | 2 +- test/cases/020_kernel/105_kmod_4.19.x/test.yml | 2 +- test/cases/020_kernel/111_kmod_5.4.x/Dockerfile | 2 +- test/cases/020_kernel/111_kmod_5.4.x/test.sh | 2 +- test/cases/020_kernel/111_kmod_5.4.x/test.yml | 2 +- test/cases/020_kernel/200_namespace/common.yml | 2 +- test/cases/030_security/000_docker-bench/test.yml | 2 +- test/cases/030_security/010_ports/test.yml | 2 +- test/cases/040_packages/002_binfmt/test.yml | 2 +- test/cases/040_packages/002_bpftrace/test.yml | 2 +- test/cases/040_packages/003_ca-certificates/test.yml | 2 +- test/cases/040_packages/003_containerd/test.yml | 2 +- test/cases/040_packages/004_dhcpcd/test.yml | 2 +- test/cases/040_packages/004_dm-crypt/000_simple/test.yml | 2 +- test/cases/040_packages/004_dm-crypt/001_luks/test.yml | 2 +- test/cases/040_packages/004_dm-crypt/002_key/test.yml | 2 +- test/cases/040_packages/005_extend/000_ext4/test-create.yml | 2 +- test/cases/040_packages/005_extend/000_ext4/test.yml | 2 +- test/cases/040_packages/005_extend/001_btrfs/test-create.yml | 2 +- test/cases/040_packages/005_extend/001_btrfs/test.yml | 2 +- test/cases/040_packages/005_extend/002_xfs/test-create.yml | 2 +- test/cases/040_packages/005_extend/002_xfs/test.yml | 2 +- test/cases/040_packages/006_format_mount/000_auto/test.yml | 2 +- .../cases/040_packages/006_format_mount/001_by_label/test.yml | 2 +- .../040_packages/006_format_mount/002_by_name/test.yml.in | 2 +- test/cases/040_packages/006_format_mount/003_btrfs/test.yml | 2 +- test/cases/040_packages/006_format_mount/004_xfs/test.yml | 2 +- .../006_format_mount/005_by_device_force/test.yml | 2 +- .../cases/040_packages/006_format_mount/010_multiple/test.yml | 2 +- test/cases/040_packages/007_getty-containerd/test.yml | 2 +- test/cases/040_packages/012_losetup/test.yml | 2 +- test/cases/040_packages/013_mkimage/mkimage.yml | 2 +- test/cases/040_packages/013_mkimage/run.yml | 2 +- test/cases/040_packages/019_sysctl/test.yml | 2 +- test/cases/040_packages/023_wireguard/test.yml | 2 +- test/cases/040_packages/030_logwrite/test.yml | 2 +- test/cases/040_packages/031_kmsg/test.yml | 2 +- test/cases/040_packages/032_bcc/test.yml | 4 ++-- test/hack/test-ltp.yml | 2 +- test/hack/test.yml | 2 +- test/pkg/ns/template.yml | 2 +- 96 files changed, 97 insertions(+), 97 deletions(-) diff --git a/examples/aws.yml b/examples/aws.yml index a94e39223..ad6e28674 100644 --- a/examples/aws.yml +++ b/examples/aws.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/azure.yml b/examples/azure.yml index 0190b07b9..6cb6ce0e1 100644 --- a/examples/azure.yml +++ b/examples/azure.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/cadvisor.yml b/examples/cadvisor.yml index f279b34b3..2e62746c8 100644 --- a/examples/cadvisor.yml +++ b/examples/cadvisor.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/dm-crypt-loop.yml b/examples/dm-crypt-loop.yml index 4d0e68776..f44cc3e69 100644 --- a/examples/dm-crypt-loop.yml +++ b/examples/dm-crypt-loop.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/dm-crypt.yml b/examples/dm-crypt.yml index 9bedf3459..b660ec3cf 100644 --- a/examples/dm-crypt.yml +++ b/examples/dm-crypt.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/docker-for-mac.yml b/examples/docker-for-mac.yml index 5f030651b..ac2004f40 100644 --- a/examples/docker-for-mac.yml +++ b/examples/docker-for-mac.yml @@ -1,6 +1,6 @@ # This is an example for building the open source components of Docker for Mac kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 page_poison=1" init: - linuxkit/vpnkit-expose-port:v0.7 # install vpnkit-expose-port and vpnkit-iptables-wrapper on host diff --git a/examples/docker.yml b/examples/docker.yml index ee46d0f0e..cdbf699de 100644 --- a/examples/docker.yml +++ b/examples/docker.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/gcp.yml b/examples/gcp.yml index 40ff20019..c3fa6eb15 100644 --- a/examples/gcp.yml +++ b/examples/gcp.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/getty.yml b/examples/getty.yml index d1f5a94c1..a38dd49a1 100644 --- a/examples/getty.yml +++ b/examples/getty.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/hetzner.yml b/examples/hetzner.yml index a7544bded..be9f26f5b 100644 --- a/examples/hetzner.yml +++ b/examples/hetzner.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: console=ttyS1 ucode: intel-ucode.cpio init: diff --git a/examples/hostmount-writeable-overlay.yml b/examples/hostmount-writeable-overlay.yml index 298c95125..562228601 100644 --- a/examples/hostmount-writeable-overlay.yml +++ b/examples/hostmount-writeable-overlay.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/influxdb-os.yml b/examples/influxdb-os.yml index 085b09e0d..145889e72 100644 --- a/examples/influxdb-os.yml +++ b/examples/influxdb-os.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/logging.yml b/examples/logging.yml index 9e17d1892..eb54cb29a 100644 --- a/examples/logging.yml +++ b/examples/logging.yml @@ -1,6 +1,6 @@ # Simple example of using an external logging service kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/minimal.yml b/examples/minimal.yml index b0e76fca3..8c688c41e 100644 --- a/examples/minimal.yml +++ b/examples/minimal.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/node_exporter.yml b/examples/node_exporter.yml index 4b976a6e4..d7200f4d3 100644 --- a/examples/node_exporter.yml +++ b/examples/node_exporter.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/openstack.yml b/examples/openstack.yml index 51cfa40c3..2fbe83429 100644 --- a/examples/openstack.yml +++ b/examples/openstack.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/packet.arm64.yml b/examples/packet.arm64.yml index 33f346c90..79ae196fa 100644 --- a/examples/packet.arm64.yml +++ b/examples/packet.arm64.yml @@ -5,7 +5,7 @@ # for arm64 then the 'ucode' line in the kernel section can be left # out. kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyAMA0" ucode: "" onboot: diff --git a/examples/packet.yml b/examples/packet.yml index 67ae89590..11d0a8cdd 100644 --- a/examples/packet.yml +++ b/examples/packet.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: console=ttyS1 ucode: intel-ucode.cpio init: diff --git a/examples/redis-os.yml b/examples/redis-os.yml index d6828155b..3e11e2805 100644 --- a/examples/redis-os.yml +++ b/examples/redis-os.yml @@ -1,7 +1,7 @@ # Minimal YAML to run a redis server (used at DockerCon'17) # connect: nc localhost 6379 kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/rt-for-vmware.yml b/examples/rt-for-vmware.yml index f9cc7e076..a76ecf418 100644 --- a/examples/rt-for-vmware.yml +++ b/examples/rt-for-vmware.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:4.19.106-rt + image: linuxkit/kernel:5.4.28-rt cmdline: "console=tty0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/scaleway.yml b/examples/scaleway.yml index e99cbf06d..ee3fb8eaa 100644 --- a/examples/scaleway.yml +++ b/examples/scaleway.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0 root=/dev/vda" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/sshd.yml b/examples/sshd.yml index c27514206..d3c54c696 100644 --- a/examples/sshd.yml +++ b/examples/sshd.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/static-ip.yml b/examples/static-ip.yml index ec7befd2a..f642e58c5 100644 --- a/examples/static-ip.yml +++ b/examples/static-ip.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/swap.yml b/examples/swap.yml index 81bb46e54..915403cbb 100644 --- a/examples/swap.yml +++ b/examples/swap.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0 console=ttysclp0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/tpm.yml b/examples/tpm.yml index 1d83efecc..099379f56 100644 --- a/examples/tpm.yml +++ b/examples/tpm.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/vmware.yml b/examples/vmware.yml index 277aa8a21..02c324077 100644 --- a/examples/vmware.yml +++ b/examples/vmware.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/vpnkit-forwarder.yml b/examples/vpnkit-forwarder.yml index bbd36fe60..b41a43678 100644 --- a/examples/vpnkit-forwarder.yml +++ b/examples/vpnkit-forwarder.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/vsudd-containerd.yml b/examples/vsudd-containerd.yml index 52bf8450b..ac9de7ab7 100644 --- a/examples/vsudd-containerd.yml +++ b/examples/vsudd-containerd.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/vultr.yml b/examples/vultr.yml index 336c2b4e5..7170415bf 100644 --- a/examples/vultr.yml +++ b/examples/vultr.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/examples/wireguard.yml b/examples/wireguard.yml index ad958dea0..23e32d380 100644 --- a/examples/wireguard.yml +++ b/examples/wireguard.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/linuxkit.yml b/linuxkit.yml index e7b81d599..ab7ef3264 100644 --- a/linuxkit.yml +++ b/linuxkit.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=tty0 console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/projects/compose/compose-dynamic.yml b/projects/compose/compose-dynamic.yml index 5309db186..50debdde8 100644 --- a/projects/compose/compose-dynamic.yml +++ b/projects/compose/compose-dynamic.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 page_poison=1" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/projects/compose/compose-static.yml b/projects/compose/compose-static.yml index 3f52d6fe5..675a6e655 100644 --- a/projects/compose/compose-static.yml +++ b/projects/compose/compose-static.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 page_poison=1" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/projects/miragesdk/examples/mirage-dhcp.yml b/projects/miragesdk/examples/mirage-dhcp.yml index e3c8e2973..478c394c6 100644 --- a/projects/miragesdk/examples/mirage-dhcp.yml +++ b/projects/miragesdk/examples/mirage-dhcp.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 page_poison=1" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/000_build/000_formats/test.yml b/test/cases/000_build/000_formats/test.yml index 5df85411d..af81c2f64 100644 --- a/test/cases/000_build/000_formats/test.yml +++ b/test/cases/000_build/000_formats/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/000_build/010_reproducible/test.yml b/test/cases/000_build/010_reproducible/test.yml index c7dff614d..e5c137185 100644 --- a/test/cases/000_build/010_reproducible/test.yml +++ b/test/cases/000_build/010_reproducible/test.yml @@ -1,6 +1,6 @@ # NOTE: Images build from this file likely do not run kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/000_run_kernel+initrd/test.yml b/test/cases/010_platforms/000_qemu/000_run_kernel+initrd/test.yml index 330d2fa2d..7330dddc3 100644 --- a/test/cases/010_platforms/000_qemu/000_run_kernel+initrd/test.yml +++ b/test/cases/010_platforms/000_qemu/000_run_kernel+initrd/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/005_run_kernel+squashfs/test.yml b/test/cases/010_platforms/000_qemu/005_run_kernel+squashfs/test.yml index 330d2fa2d..7330dddc3 100644 --- a/test/cases/010_platforms/000_qemu/005_run_kernel+squashfs/test.yml +++ b/test/cases/010_platforms/000_qemu/005_run_kernel+squashfs/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/010_run_iso/test.yml b/test/cases/010_platforms/000_qemu/010_run_iso/test.yml index 691f39e47..dfccd9793 100644 --- a/test/cases/010_platforms/000_qemu/010_run_iso/test.yml +++ b/test/cases/010_platforms/000_qemu/010_run_iso/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/020_run_efi/test.yml b/test/cases/010_platforms/000_qemu/020_run_efi/test.yml index ebffa1ae3..abdfff6f6 100644 --- a/test/cases/010_platforms/000_qemu/020_run_efi/test.yml +++ b/test/cases/010_platforms/000_qemu/020_run_efi/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/030_run_qcow_bios/test.yml b/test/cases/010_platforms/000_qemu/030_run_qcow_bios/test.yml index ebffa1ae3..abdfff6f6 100644 --- a/test/cases/010_platforms/000_qemu/030_run_qcow_bios/test.yml +++ b/test/cases/010_platforms/000_qemu/030_run_qcow_bios/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/040_run_raw_bios/test.yml b/test/cases/010_platforms/000_qemu/040_run_raw_bios/test.yml index ebffa1ae3..abdfff6f6 100644 --- a/test/cases/010_platforms/000_qemu/040_run_raw_bios/test.yml +++ b/test/cases/010_platforms/000_qemu/040_run_raw_bios/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/050_run_aws/test.yml b/test/cases/010_platforms/000_qemu/050_run_aws/test.yml index ebffa1ae3..abdfff6f6 100644 --- a/test/cases/010_platforms/000_qemu/050_run_aws/test.yml +++ b/test/cases/010_platforms/000_qemu/050_run_aws/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/000_qemu/100_container/test.yml b/test/cases/010_platforms/000_qemu/100_container/test.yml index f8849e0a4..6c170d126 100644 --- a/test/cases/010_platforms/000_qemu/100_container/test.yml +++ b/test/cases/010_platforms/000_qemu/100_container/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/010_hyperkit/000_run_kernel+initrd/test.yml b/test/cases/010_platforms/010_hyperkit/000_run_kernel+initrd/test.yml index ebffa1ae3..abdfff6f6 100644 --- a/test/cases/010_platforms/010_hyperkit/000_run_kernel+initrd/test.yml +++ b/test/cases/010_platforms/010_hyperkit/000_run_kernel+initrd/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/010_hyperkit/005_run_kernel+squashfs/test.yml b/test/cases/010_platforms/010_hyperkit/005_run_kernel+squashfs/test.yml index ebffa1ae3..abdfff6f6 100644 --- a/test/cases/010_platforms/010_hyperkit/005_run_kernel+squashfs/test.yml +++ b/test/cases/010_platforms/010_hyperkit/005_run_kernel+squashfs/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/010_hyperkit/010_acpi/test.yml b/test/cases/010_platforms/010_hyperkit/010_acpi/test.yml index 063e8966b..9587280a7 100644 --- a/test/cases/010_platforms/010_hyperkit/010_acpi/test.yml +++ b/test/cases/010_platforms/010_hyperkit/010_acpi/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/010_platforms/110_gcp/000_run/test.yml b/test/cases/010_platforms/110_gcp/000_run/test.yml index 82fed9b6e..b07c43b4a 100644 --- a/test/cases/010_platforms/110_gcp/000_run/test.yml +++ b/test/cases/010_platforms/110_gcp/000_run/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/002_config_4.14.x/test.yml b/test/cases/020_kernel/002_config_4.14.x/test.yml index 72ce49295..f3936c194 100644 --- a/test/cases/020_kernel/002_config_4.14.x/test.yml +++ b/test/cases/020_kernel/002_config_4.14.x/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:4.14.174 + image: linuxkit/kernel:4.14.175 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/005_config_4.19.x/test.yml b/test/cases/020_kernel/005_config_4.19.x/test.yml index 806745d34..3c117cb94 100644 --- a/test/cases/020_kernel/005_config_4.19.x/test.yml +++ b/test/cases/020_kernel/005_config_4.19.x/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:4.19.113 + image: linuxkit/kernel:4.19.114 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/011_config_5.4.x/test.yml b/test/cases/020_kernel/011_config_5.4.x/test.yml index 41e852801..3077100ee 100644 --- a/test/cases/020_kernel/011_config_5.4.x/test.yml +++ b/test/cases/020_kernel/011_config_5.4.x/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/102_kmod_4.14.x/Dockerfile b/test/cases/020_kernel/102_kmod_4.14.x/Dockerfile index 360aa5bc1..99fe12b0e 100644 --- a/test/cases/020_kernel/102_kmod_4.14.x/Dockerfile +++ b/test/cases/020_kernel/102_kmod_4.14.x/Dockerfile @@ -3,7 +3,7 @@ # In the last stage, it creates a package, which can be used for # testing. -FROM linuxkit/kernel:4.14.174 AS ksrc +FROM linuxkit/kernel:4.14.175 AS ksrc # Extract headers and compile module FROM linuxkit/alpine:3fdc49366257e53276c6f363956a4353f95d9a81 AS build diff --git a/test/cases/020_kernel/102_kmod_4.14.x/test.sh b/test/cases/020_kernel/102_kmod_4.14.x/test.sh index 344fe8594..b79378b5a 100644 --- a/test/cases/020_kernel/102_kmod_4.14.x/test.sh +++ b/test/cases/020_kernel/102_kmod_4.14.x/test.sh @@ -19,7 +19,7 @@ clean_up() { trap clean_up EXIT # Make sure we have the latest kernel image -docker pull linuxkit/kernel:4.14.174 +docker pull linuxkit/kernel:4.14.175 # Build a package docker build -t ${IMAGE_NAME} . diff --git a/test/cases/020_kernel/102_kmod_4.14.x/test.yml b/test/cases/020_kernel/102_kmod_4.14.x/test.yml index 3a1e55130..05ba661ae 100644 --- a/test/cases/020_kernel/102_kmod_4.14.x/test.yml +++ b/test/cases/020_kernel/102_kmod_4.14.x/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:4.14.174 + image: linuxkit/kernel:4.14.175 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/105_kmod_4.19.x/Dockerfile b/test/cases/020_kernel/105_kmod_4.19.x/Dockerfile index 312bcbdbb..db9fd3fb7 100644 --- a/test/cases/020_kernel/105_kmod_4.19.x/Dockerfile +++ b/test/cases/020_kernel/105_kmod_4.19.x/Dockerfile @@ -3,7 +3,7 @@ # In the last stage, it creates a package, which can be used for # testing. -FROM linuxkit/kernel:4.19.113 AS ksrc +FROM linuxkit/kernel:4.19.114 AS ksrc # Extract headers and compile module FROM linuxkit/alpine:3fdc49366257e53276c6f363956a4353f95d9a81 AS build diff --git a/test/cases/020_kernel/105_kmod_4.19.x/test.sh b/test/cases/020_kernel/105_kmod_4.19.x/test.sh index 956b53a3a..ea51c912a 100644 --- a/test/cases/020_kernel/105_kmod_4.19.x/test.sh +++ b/test/cases/020_kernel/105_kmod_4.19.x/test.sh @@ -19,7 +19,7 @@ clean_up() { trap clean_up EXIT # Make sure we have the latest kernel image -docker pull linuxkit/kernel:4.19.113 +docker pull linuxkit/kernel:4.19.114 # Build a package docker build -t ${IMAGE_NAME} . diff --git a/test/cases/020_kernel/105_kmod_4.19.x/test.yml b/test/cases/020_kernel/105_kmod_4.19.x/test.yml index 9c111ea44..02b322326 100644 --- a/test/cases/020_kernel/105_kmod_4.19.x/test.yml +++ b/test/cases/020_kernel/105_kmod_4.19.x/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:4.19.113 + image: linuxkit/kernel:4.19.114 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/111_kmod_5.4.x/Dockerfile b/test/cases/020_kernel/111_kmod_5.4.x/Dockerfile index f5400b264..2edba79ce 100644 --- a/test/cases/020_kernel/111_kmod_5.4.x/Dockerfile +++ b/test/cases/020_kernel/111_kmod_5.4.x/Dockerfile @@ -3,7 +3,7 @@ # In the last stage, it creates a package, which can be used for # testing. -FROM linuxkit/kernel:5.4.28 AS ksrc +FROM linuxkit/kernel:5.4.30 AS ksrc # Extract headers and compile module FROM linuxkit/alpine:3fdc49366257e53276c6f363956a4353f95d9a81 AS build diff --git a/test/cases/020_kernel/111_kmod_5.4.x/test.sh b/test/cases/020_kernel/111_kmod_5.4.x/test.sh index 8df237339..0babc8144 100644 --- a/test/cases/020_kernel/111_kmod_5.4.x/test.sh +++ b/test/cases/020_kernel/111_kmod_5.4.x/test.sh @@ -19,7 +19,7 @@ clean_up() { trap clean_up EXIT # Make sure we have the latest kernel image -docker pull linuxkit/kernel:5.4.28 +docker pull linuxkit/kernel:5.4.30 # Build a package docker build -t ${IMAGE_NAME} . diff --git a/test/cases/020_kernel/111_kmod_5.4.x/test.yml b/test/cases/020_kernel/111_kmod_5.4.x/test.yml index 5eae59186..292e4b7a8 100644 --- a/test/cases/020_kernel/111_kmod_5.4.x/test.yml +++ b/test/cases/020_kernel/111_kmod_5.4.x/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/020_kernel/200_namespace/common.yml b/test/cases/020_kernel/200_namespace/common.yml index 81fecb7c3..e5e1f93f9 100644 --- a/test/cases/020_kernel/200_namespace/common.yml +++ b/test/cases/020_kernel/200_namespace/common.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/030_security/000_docker-bench/test.yml b/test/cases/030_security/000_docker-bench/test.yml index 87a833349..9c3fb188b 100644 --- a/test/cases/030_security/000_docker-bench/test.yml +++ b/test/cases/030_security/000_docker-bench/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/030_security/010_ports/test.yml b/test/cases/030_security/010_ports/test.yml index 7a03cde6a..de179a439 100644 --- a/test/cases/030_security/010_ports/test.yml +++ b/test/cases/030_security/010_ports/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 page_poison=1" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/002_binfmt/test.yml b/test/cases/040_packages/002_binfmt/test.yml index d26a1e1c5..95932d249 100644 --- a/test/cases/040_packages/002_binfmt/test.yml +++ b/test/cases/040_packages/002_binfmt/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/002_bpftrace/test.yml b/test/cases/040_packages/002_bpftrace/test.yml index 96e98f2f7..8561e43c5 100644 --- a/test/cases/040_packages/002_bpftrace/test.yml +++ b/test/cases/040_packages/002_bpftrace/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/003_ca-certificates/test.yml b/test/cases/040_packages/003_ca-certificates/test.yml index a681e1733..b7fc5a4f8 100644 --- a/test/cases/040_packages/003_ca-certificates/test.yml +++ b/test/cases/040_packages/003_ca-certificates/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/003_containerd/test.yml b/test/cases/040_packages/003_containerd/test.yml index 7abc2485e..284567db3 100644 --- a/test/cases/040_packages/003_containerd/test.yml +++ b/test/cases/040_packages/003_containerd/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/004_dhcpcd/test.yml b/test/cases/040_packages/004_dhcpcd/test.yml index 575773bf1..50cba310c 100644 --- a/test/cases/040_packages/004_dhcpcd/test.yml +++ b/test/cases/040_packages/004_dhcpcd/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/004_dm-crypt/000_simple/test.yml b/test/cases/040_packages/004_dm-crypt/000_simple/test.yml index fba946366..49cb33516 100644 --- a/test/cases/040_packages/004_dm-crypt/000_simple/test.yml +++ b/test/cases/040_packages/004_dm-crypt/000_simple/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/004_dm-crypt/001_luks/test.yml b/test/cases/040_packages/004_dm-crypt/001_luks/test.yml index 739de3ed9..d83d5a468 100644 --- a/test/cases/040_packages/004_dm-crypt/001_luks/test.yml +++ b/test/cases/040_packages/004_dm-crypt/001_luks/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/004_dm-crypt/002_key/test.yml b/test/cases/040_packages/004_dm-crypt/002_key/test.yml index d0c22542c..f0cdaabbf 100644 --- a/test/cases/040_packages/004_dm-crypt/002_key/test.yml +++ b/test/cases/040_packages/004_dm-crypt/002_key/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/005_extend/000_ext4/test-create.yml b/test/cases/040_packages/005_extend/000_ext4/test-create.yml index 2d44ec743..2fdb7e8cd 100644 --- a/test/cases/040_packages/005_extend/000_ext4/test-create.yml +++ b/test/cases/040_packages/005_extend/000_ext4/test-create.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/005_extend/000_ext4/test.yml b/test/cases/040_packages/005_extend/000_ext4/test.yml index a3fe58fb0..fe3d4fd54 100644 --- a/test/cases/040_packages/005_extend/000_ext4/test.yml +++ b/test/cases/040_packages/005_extend/000_ext4/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/005_extend/001_btrfs/test-create.yml b/test/cases/040_packages/005_extend/001_btrfs/test-create.yml index e175bfb41..c7dfd94f5 100644 --- a/test/cases/040_packages/005_extend/001_btrfs/test-create.yml +++ b/test/cases/040_packages/005_extend/001_btrfs/test-create.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/005_extend/001_btrfs/test.yml b/test/cases/040_packages/005_extend/001_btrfs/test.yml index 859ff9298..9b25fab95 100644 --- a/test/cases/040_packages/005_extend/001_btrfs/test.yml +++ b/test/cases/040_packages/005_extend/001_btrfs/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/005_extend/002_xfs/test-create.yml b/test/cases/040_packages/005_extend/002_xfs/test-create.yml index 869340f89..8f79ac1f9 100644 --- a/test/cases/040_packages/005_extend/002_xfs/test-create.yml +++ b/test/cases/040_packages/005_extend/002_xfs/test-create.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/005_extend/002_xfs/test.yml b/test/cases/040_packages/005_extend/002_xfs/test.yml index 4a62a675e..20b79c6eb 100644 --- a/test/cases/040_packages/005_extend/002_xfs/test.yml +++ b/test/cases/040_packages/005_extend/002_xfs/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/000_auto/test.yml b/test/cases/040_packages/006_format_mount/000_auto/test.yml index 6085c6cb9..981b035b3 100644 --- a/test/cases/040_packages/006_format_mount/000_auto/test.yml +++ b/test/cases/040_packages/006_format_mount/000_auto/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/001_by_label/test.yml b/test/cases/040_packages/006_format_mount/001_by_label/test.yml index 3d3f12587..3e23dd468 100644 --- a/test/cases/040_packages/006_format_mount/001_by_label/test.yml +++ b/test/cases/040_packages/006_format_mount/001_by_label/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/002_by_name/test.yml.in b/test/cases/040_packages/006_format_mount/002_by_name/test.yml.in index 99dbfdc8c..3cae3097c 100644 --- a/test/cases/040_packages/006_format_mount/002_by_name/test.yml.in +++ b/test/cases/040_packages/006_format_mount/002_by_name/test.yml.in @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/003_btrfs/test.yml b/test/cases/040_packages/006_format_mount/003_btrfs/test.yml index 148589e3b..e06010075 100644 --- a/test/cases/040_packages/006_format_mount/003_btrfs/test.yml +++ b/test/cases/040_packages/006_format_mount/003_btrfs/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/004_xfs/test.yml b/test/cases/040_packages/006_format_mount/004_xfs/test.yml index 032595c11..72e7dfe89 100644 --- a/test/cases/040_packages/006_format_mount/004_xfs/test.yml +++ b/test/cases/040_packages/006_format_mount/004_xfs/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/005_by_device_force/test.yml b/test/cases/040_packages/006_format_mount/005_by_device_force/test.yml index 7bfcb76aa..80f3cb676 100644 --- a/test/cases/040_packages/006_format_mount/005_by_device_force/test.yml +++ b/test/cases/040_packages/006_format_mount/005_by_device_force/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/006_format_mount/010_multiple/test.yml b/test/cases/040_packages/006_format_mount/010_multiple/test.yml index dd1249fc8..40e850f4e 100644 --- a/test/cases/040_packages/006_format_mount/010_multiple/test.yml +++ b/test/cases/040_packages/006_format_mount/010_multiple/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/007_getty-containerd/test.yml b/test/cases/040_packages/007_getty-containerd/test.yml index 7e13053c0..984ed56aa 100644 --- a/test/cases/040_packages/007_getty-containerd/test.yml +++ b/test/cases/040_packages/007_getty-containerd/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/012_losetup/test.yml b/test/cases/040_packages/012_losetup/test.yml index 08fc943c2..46327c5ad 100644 --- a/test/cases/040_packages/012_losetup/test.yml +++ b/test/cases/040_packages/012_losetup/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/013_mkimage/mkimage.yml b/test/cases/040_packages/013_mkimage/mkimage.yml index ef271cbd1..b58014f7a 100644 --- a/test/cases/040_packages/013_mkimage/mkimage.yml +++ b/test/cases/040_packages/013_mkimage/mkimage.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/013_mkimage/run.yml b/test/cases/040_packages/013_mkimage/run.yml index d29c50c43..d5a739d70 100644 --- a/test/cases/040_packages/013_mkimage/run.yml +++ b/test/cases/040_packages/013_mkimage/run.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/019_sysctl/test.yml b/test/cases/040_packages/019_sysctl/test.yml index 9a2726842..49b835408 100644 --- a/test/cases/040_packages/019_sysctl/test.yml +++ b/test/cases/040_packages/019_sysctl/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/023_wireguard/test.yml b/test/cases/040_packages/023_wireguard/test.yml index 9d1832fbf..e6089b55c 100644 --- a/test/cases/040_packages/023_wireguard/test.yml +++ b/test/cases/040_packages/023_wireguard/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/030_logwrite/test.yml b/test/cases/040_packages/030_logwrite/test.yml index 52c20d2c4..c858b1b76 100644 --- a/test/cases/040_packages/030_logwrite/test.yml +++ b/test/cases/040_packages/030_logwrite/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/031_kmsg/test.yml b/test/cases/040_packages/031_kmsg/test.yml index 51cfccbe5..abca9f84a 100644 --- a/test/cases/040_packages/031_kmsg/test.yml +++ b/test/cases/040_packages/031_kmsg/test.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/cases/040_packages/032_bcc/test.yml b/test/cases/040_packages/032_bcc/test.yml index ede9ff310..3ade12ba0 100644 --- a/test/cases/040_packages/032_bcc/test.yml +++ b/test/cases/040_packages/032_bcc/test.yml @@ -1,10 +1,10 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0 console=ttyAMA0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 - linuxkit/runc:69b4a35eaa22eba4990ee52cccc8f48f6c08ed03 - - linuxkit/kernel-bcc:5.4.28 + - linuxkit/kernel-bcc:5.4.30 onboot: - name: check-bcc image: alpine:3.9 diff --git a/test/hack/test-ltp.yml b/test/hack/test-ltp.yml index 78f30ca1a..4fbb0337d 100644 --- a/test/hack/test-ltp.yml +++ b/test/hack/test-ltp.yml @@ -1,5 +1,5 @@ kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/hack/test.yml b/test/hack/test.yml index ec404b065..9af6058d9 100644 --- a/test/hack/test.yml +++ b/test/hack/test.yml @@ -1,7 +1,7 @@ # FIXME: This should use the minimal example # We continue to use the kernel-config-test as CI is currently expecting to see a success message kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 diff --git a/test/pkg/ns/template.yml b/test/pkg/ns/template.yml index 4718ba893..c5e18ee74 100644 --- a/test/pkg/ns/template.yml +++ b/test/pkg/ns/template.yml @@ -1,6 +1,6 @@ # Sample YAML file for manual testing kernel: - image: linuxkit/kernel:5.4.28 + image: linuxkit/kernel:5.4.30 cmdline: "console=ttyS0" init: - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 From 673303575d269a954086467d8c85ee7a4cb628ad Mon Sep 17 00:00:00 2001 From: Rolf Neugebauer Date: Fri, 10 Apr 2020 18:13:03 +0100 Subject: [PATCH 8/8] test: Add kernel tests for 5.6.x Signed-off-by: Rolf Neugebauer --- .../cases/020_kernel/012_config_5.6.x/test.sh | 24 ++++++++++++++ .../020_kernel/012_config_5.6.x/test.yml | 15 +++++++++ .../020_kernel/112_kmod_5.6.x/Dockerfile | 23 ++++++++++++++ test/cases/020_kernel/112_kmod_5.6.x/check.sh | 15 +++++++++ .../020_kernel/112_kmod_5.6.x/src/Makefile | 6 ++++ .../112_kmod_5.6.x/src/hello_world.c | 22 +++++++++++++ test/cases/020_kernel/112_kmod_5.6.x/test.sh | 31 +++++++++++++++++++ test/cases/020_kernel/112_kmod_5.6.x/test.yml | 20 ++++++++++++ 8 files changed, 156 insertions(+) create mode 100644 test/cases/020_kernel/012_config_5.6.x/test.sh create mode 100644 test/cases/020_kernel/012_config_5.6.x/test.yml create mode 100644 test/cases/020_kernel/112_kmod_5.6.x/Dockerfile create mode 100755 test/cases/020_kernel/112_kmod_5.6.x/check.sh create mode 100644 test/cases/020_kernel/112_kmod_5.6.x/src/Makefile create mode 100644 test/cases/020_kernel/112_kmod_5.6.x/src/hello_world.c create mode 100644 test/cases/020_kernel/112_kmod_5.6.x/test.sh create mode 100644 test/cases/020_kernel/112_kmod_5.6.x/test.yml diff --git a/test/cases/020_kernel/012_config_5.6.x/test.sh b/test/cases/020_kernel/012_config_5.6.x/test.sh new file mode 100644 index 000000000..fdfccb99c --- /dev/null +++ b/test/cases/020_kernel/012_config_5.6.x/test.sh @@ -0,0 +1,24 @@ +#!/bin/sh +# SUMMARY: Sanity check on the kernel config file +# LABELS: +# REPEAT: + +set -e + +# Source libraries. Uncomment if needed/defined +#. "${RT_LIB}" +. "${RT_PROJECT_ROOT}/_lib/lib.sh" + +NAME=kconfig + +clean_up() { + rm -rf ${NAME}-* +} +trap clean_up EXIT + +# Test code goes here +linuxkit build -format kernel+initrd -name "${NAME}" test.yml +RESULT="$(linuxkit run ${NAME})" +echo "${RESULT}" | grep -q "suite PASSED" + +exit 0 diff --git a/test/cases/020_kernel/012_config_5.6.x/test.yml b/test/cases/020_kernel/012_config_5.6.x/test.yml new file mode 100644 index 000000000..30319c9cc --- /dev/null +++ b/test/cases/020_kernel/012_config_5.6.x/test.yml @@ -0,0 +1,15 @@ +kernel: + image: linuxkit/kernel:5.6.2 + cmdline: "console=ttyS0 console=ttyAMA0" +init: + - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 + - linuxkit/runc:69b4a35eaa22eba4990ee52cccc8f48f6c08ed03 +onboot: + - name: check-kernel-config + image: linuxkit/test-kernel-config:94fdeb494e09200fc05b6da39822aabfaca234e4 + - name: poweroff + image: linuxkit/poweroff:b498d30dd9660090565537fceb9e757618737a85 + command: ["/bin/sh", "/poweroff.sh", "3"] +trust: + org: + - linuxkit diff --git a/test/cases/020_kernel/112_kmod_5.6.x/Dockerfile b/test/cases/020_kernel/112_kmod_5.6.x/Dockerfile new file mode 100644 index 000000000..483762da6 --- /dev/null +++ b/test/cases/020_kernel/112_kmod_5.6.x/Dockerfile @@ -0,0 +1,23 @@ +# This Dockerfile extracts the kernel headers from the kernel image +# and then compiles a simple hello world kernel module against them. +# In the last stage, it creates a package, which can be used for +# testing. + +FROM linuxkit/kernel:5.6.2 AS ksrc + +# Extract headers and compile module +FROM linuxkit/alpine:3fdc49366257e53276c6f363956a4353f95d9a81 AS build +RUN apk add build-base elfutils-dev + +COPY --from=ksrc /kernel-dev.tar / +RUN tar xf kernel-dev.tar + +WORKDIR /kmod +COPY ./src/* ./ +RUN make all + +# Package +FROM alpine:3.9 +COPY --from=build /kmod/hello_world.ko / +COPY check.sh /check.sh +ENTRYPOINT ["/bin/sh", "/check.sh"] diff --git a/test/cases/020_kernel/112_kmod_5.6.x/check.sh b/test/cases/020_kernel/112_kmod_5.6.x/check.sh new file mode 100755 index 000000000..02e491624 --- /dev/null +++ b/test/cases/020_kernel/112_kmod_5.6.x/check.sh @@ -0,0 +1,15 @@ +#!/bin/sh +function failed { + printf "Kernel module test suite FAILED\n" + /sbin/poweroff -f +} + +uname -a +modinfo hello_world.ko || failed +insmod hello_world.ko || failed +[ -n "$(dmesg | grep -o 'Hello LinuxKit')" ] || failed +rmmod hello_world || failed + +printf "Kernel module test suite PASSED\n" + +/sbin/poweroff -f diff --git a/test/cases/020_kernel/112_kmod_5.6.x/src/Makefile b/test/cases/020_kernel/112_kmod_5.6.x/src/Makefile new file mode 100644 index 000000000..31c8215dd --- /dev/null +++ b/test/cases/020_kernel/112_kmod_5.6.x/src/Makefile @@ -0,0 +1,6 @@ +obj-m += hello_world.o +KVER=$(shell basename /usr/src/linux-headers-*) +all: + make -C /usr/src/$(KVER) M=$(PWD) modules +clean: + make -C /usr/src/$(KVER) M=$(PWD) clean diff --git a/test/cases/020_kernel/112_kmod_5.6.x/src/hello_world.c b/test/cases/020_kernel/112_kmod_5.6.x/src/hello_world.c new file mode 100644 index 000000000..7dd6d3ee2 --- /dev/null +++ b/test/cases/020_kernel/112_kmod_5.6.x/src/hello_world.c @@ -0,0 +1,22 @@ +/* + * A simple Hello World kernel module + */ +#include +#include + +int init_hello(void) +{ + printk(KERN_INFO "Hello LinuxKit\n"); + return 0; +} + +void exit_hello(void) +{ + printk(KERN_INFO "Goodbye LinuxKit.\n"); +} + +module_init(init_hello); +module_exit(exit_hello); +MODULE_AUTHOR("Rolf Neugebauer "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("A simple Hello World kernel module for testing"); diff --git a/test/cases/020_kernel/112_kmod_5.6.x/test.sh b/test/cases/020_kernel/112_kmod_5.6.x/test.sh new file mode 100644 index 000000000..c9788a75b --- /dev/null +++ b/test/cases/020_kernel/112_kmod_5.6.x/test.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# SUMMARY: Test build and insertion of kernel modules +# LABELS: +# REPEAT: + +set -e + +# Source libraries. Uncomment if needed/defined +#. "${RT_LIB}" +. "${RT_PROJECT_ROOT}/_lib/lib.sh" + +NAME=kmod +IMAGE_NAME=kmod-test + +clean_up() { + docker rmi ${IMAGE_NAME} || true + rm -rf ${NAME}-* +} +trap clean_up EXIT + +# Make sure we have the latest kernel image +docker pull linuxkit/kernel:5.6.2 +# Build a package +docker build -t ${IMAGE_NAME} . + +# Build and run a LinuxKit image with kernel module (and test script) +linuxkit build -format kernel+initrd -name "${NAME}" test.yml +RESULT="$(linuxkit run ${NAME})" +echo "${RESULT}" | grep -q "Hello LinuxKit" + +exit 0 diff --git a/test/cases/020_kernel/112_kmod_5.6.x/test.yml b/test/cases/020_kernel/112_kmod_5.6.x/test.yml new file mode 100644 index 000000000..6b2f7e59c --- /dev/null +++ b/test/cases/020_kernel/112_kmod_5.6.x/test.yml @@ -0,0 +1,20 @@ +kernel: + image: linuxkit/kernel:5.6.2 + cmdline: "console=ttyS0 console=ttyAMA0" +init: + - linuxkit/init:a0246dd478a24abbee0a4cede99662ffc4931691 + - linuxkit/runc:69b4a35eaa22eba4990ee52cccc8f48f6c08ed03 +onboot: + - name: check + image: kmod-test + binds: + - /dev:/dev + - /lib/modules:/lib/modules + capabilities: + - all + - name: poweroff + image: linuxkit/poweroff:b498d30dd9660090565537fceb9e757618737a85 + command: ["/bin/sh", "/poweroff.sh", "3"] +trust: + org: + - linuxkit