summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mk/kernel-ver.mk4
-rw-r--r--target/avr32/grasshopper/patches/4.8.6/0001-grasshopper.patch (renamed from target/avr32/grasshopper/patches/4.8.5/0001-grasshopper.patch)0
-rw-r--r--target/avr32/grasshopper/patches/4.8.6/0002-grasshopper-led-pwm.patch (renamed from target/avr32/grasshopper/patches/4.8.5/0002-grasshopper-led-pwm.patch)0
-rw-r--r--target/avr32/grasshopper/patches/4.8.6/0003-grasshopper-mmc.patch (renamed from target/avr32/grasshopper/patches/4.8.5/0003-grasshopper-mmc.patch)0
-rw-r--r--target/avr32/grasshopper/patches/4.8.6/0004-grasshopper-i2c.patch (renamed from target/avr32/grasshopper/patches/4.8.5/0004-grasshopper-i2c.patch)0
-rw-r--r--target/avr32/grasshopper/patches/4.8.6/0005-grasshopper-usart.patch (renamed from target/avr32/grasshopper/patches/4.8.5/0005-grasshopper-usart.patch)0
-rw-r--r--target/avr32/grasshopper/patches/4.8.6/0100-netfilter-init-fix.patch (renamed from target/avr32/grasshopper/patches/4.8.5/0100-netfilter-init-fix.patch)0
-rw-r--r--target/config/Config.in.kernelversion2
-rw-r--r--target/linux/config/Config.in.kernel6
-rw-r--r--target/linux/patches/4.1.35/patch-realtime8564
-rw-r--r--target/linux/patches/4.8.6/crisv32.patch (renamed from target/linux/patches/4.8.5/crisv32.patch)0
-rw-r--r--target/linux/patches/4.8.6/crisv32_ethernet_driver.patch (renamed from target/linux/patches/4.8.5/crisv32_ethernet_driver.patch)0
-rw-r--r--target/linux/patches/4.8.6/initramfs-nosizelimit.patch (renamed from target/linux/patches/4.8.5/initramfs-nosizelimit.patch)0
-rw-r--r--target/linux/patches/4.8.6/ld-or1k.patch (renamed from target/linux/patches/4.8.5/ld-or1k.patch)0
-rw-r--r--target/linux/patches/4.8.6/macsonic.patch (renamed from target/linux/patches/4.8.5/macsonic.patch)0
-rw-r--r--target/linux/patches/4.8.6/patch-realtime25908
-rw-r--r--target/linux/patches/4.8.6/startup.patch (renamed from target/linux/patches/4.8.5/startup.patch)0
-rw-r--r--target/linux/patches/4.8.6/vdso2.patch (renamed from target/linux/patches/4.8.5/vdso2.patch)0
18 files changed, 30940 insertions, 3544 deletions
diff --git a/mk/kernel-ver.mk b/mk/kernel-ver.mk
index 9c60855ff..277d6acb0 100644
--- a/mk/kernel-ver.mk
+++ b/mk/kernel-ver.mk
@@ -20,10 +20,10 @@ KERNEL_RELEASE:= 1
KERNEL_VERSION:= $(KERNEL_FILE_VER)-$(KERNEL_RELEASE)
endif
ifeq ($(ADK_TARGET_KERNEL_VERSION_4_8),y)
-KERNEL_FILE_VER:= 4.8.5
+KERNEL_FILE_VER:= 4.8.6
KERNEL_RELEASE:= 1
KERNEL_VERSION:= $(KERNEL_FILE_VER)-$(KERNEL_RELEASE)
-KERNEL_HASH:= 48171714923f52b03e667011426cb62089fd3df9eb4be1227b781188d680c408
+KERNEL_HASH:= 74744e00420856cfc8049fa3b3a55e57a116994226a498ef56801bc9492df36b
endif
ifeq ($(ADK_TARGET_KERNEL_VERSION_4_4),y)
KERNEL_FILE_VER:= 4.4.28
diff --git a/target/avr32/grasshopper/patches/4.8.5/0001-grasshopper.patch b/target/avr32/grasshopper/patches/4.8.6/0001-grasshopper.patch
index 0fa4535c2..0fa4535c2 100644
--- a/target/avr32/grasshopper/patches/4.8.5/0001-grasshopper.patch
+++ b/target/avr32/grasshopper/patches/4.8.6/0001-grasshopper.patch
diff --git a/target/avr32/grasshopper/patches/4.8.5/0002-grasshopper-led-pwm.patch b/target/avr32/grasshopper/patches/4.8.6/0002-grasshopper-led-pwm.patch
index 977c91523..977c91523 100644
--- a/target/avr32/grasshopper/patches/4.8.5/0002-grasshopper-led-pwm.patch
+++ b/target/avr32/grasshopper/patches/4.8.6/0002-grasshopper-led-pwm.patch
diff --git a/target/avr32/grasshopper/patches/4.8.5/0003-grasshopper-mmc.patch b/target/avr32/grasshopper/patches/4.8.6/0003-grasshopper-mmc.patch
index f87d4731b..f87d4731b 100644
--- a/target/avr32/grasshopper/patches/4.8.5/0003-grasshopper-mmc.patch
+++ b/target/avr32/grasshopper/patches/4.8.6/0003-grasshopper-mmc.patch
diff --git a/target/avr32/grasshopper/patches/4.8.5/0004-grasshopper-i2c.patch b/target/avr32/grasshopper/patches/4.8.6/0004-grasshopper-i2c.patch
index b0796ed5a..b0796ed5a 100644
--- a/target/avr32/grasshopper/patches/4.8.5/0004-grasshopper-i2c.patch
+++ b/target/avr32/grasshopper/patches/4.8.6/0004-grasshopper-i2c.patch
diff --git a/target/avr32/grasshopper/patches/4.8.5/0005-grasshopper-usart.patch b/target/avr32/grasshopper/patches/4.8.6/0005-grasshopper-usart.patch
index 448647c29..448647c29 100644
--- a/target/avr32/grasshopper/patches/4.8.5/0005-grasshopper-usart.patch
+++ b/target/avr32/grasshopper/patches/4.8.6/0005-grasshopper-usart.patch
diff --git a/target/avr32/grasshopper/patches/4.8.5/0100-netfilter-init-fix.patch b/target/avr32/grasshopper/patches/4.8.6/0100-netfilter-init-fix.patch
index 9d80e8b53..9d80e8b53 100644
--- a/target/avr32/grasshopper/patches/4.8.5/0100-netfilter-init-fix.patch
+++ b/target/avr32/grasshopper/patches/4.8.6/0100-netfilter-init-fix.patch
diff --git a/target/config/Config.in.kernelversion b/target/config/Config.in.kernelversion
index 95e03897b..9a35ad682 100644
--- a/target/config/Config.in.kernelversion
+++ b/target/config/Config.in.kernelversion
@@ -34,7 +34,7 @@ config ADK_TARGET_KERNEL_VERSION_BB
depends on ADK_TARGET_SYSTEM_BEAGLEBONE_BLACK
config ADK_TARGET_KERNEL_VERSION_4_8
- bool "4.8.5"
+ bool "4.8.6"
depends on !ADK_TARGET_ARCH_METAG
depends on !ADK_TARGET_ARCH_NDS32
depends on !ADK_TARGET_ARCH_SPARC
diff --git a/target/linux/config/Config.in.kernel b/target/linux/config/Config.in.kernel
index 393b1628b..ad0e01819 100644
--- a/target/linux/config/Config.in.kernel
+++ b/target/linux/config/Config.in.kernel
@@ -146,7 +146,8 @@ config ADK_KERNEL_PREEMPT_RTB
select ADK_KERNEL_PATCH_REALTIME
depends on !ADK_KERNEL_OPROFILE
depends on ADK_TARGET_KERNEL_VERSION_4_1 \
- || ADK_TARGET_KERNEL_VERSION_4_4
+ || ADK_TARGET_KERNEL_VERSION_4_4 \
+ || ADK_TARGET_KERNEL_VERSION_4_8
help
Preemptible Kernel (Basic RT)
@@ -156,7 +157,8 @@ config ADK_KERNEL_PREEMPT_RT_FULL
select ADK_KERNEL_PATCH_REALTIME
depends on !ADK_KERNEL_OPROFILE
depends on ADK_TARGET_KERNEL_VERSION_4_1 \
- || ADK_TARGET_KERNEL_VERSION_4_4
+ || ADK_TARGET_KERNEL_VERSION_4_4 \
+ || ADK_TARGET_KERNEL_VERSION_4_8
help
Fully Preemptible Kernel (RealTime)
https://www.kernel.org/pub/linux/kernel/projects/rt/
diff --git a/target/linux/patches/4.1.35/patch-realtime b/target/linux/patches/4.1.35/patch-realtime
index 6ecf019b9..94d6fe355 100644
--- a/target/linux/patches/4.1.35/patch-realtime
+++ b/target/linux/patches/4.1.35/patch-realtime
@@ -1,6 +1,313 @@
-diff -Nur linux-4.1.26.orig/arch/alpha/mm/fault.c linux-4.1.26/arch/alpha/mm/fault.c
---- linux-4.1.26.orig/arch/alpha/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/alpha/mm/fault.c 2016-06-19 15:30:54.915151887 +0200
+diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt
+new file mode 100644
+index 000000000000..cb61516483d3
+--- /dev/null
++++ b/Documentation/hwlat_detector.txt
+@@ -0,0 +1,64 @@
++Introduction:
++-------------
++
++The module hwlat_detector is a special purpose kernel module that is used to
++detect large system latencies induced by the behavior of certain underlying
++hardware or firmware, independent of Linux itself. The code was developed
++originally to detect SMIs (System Management Interrupts) on x86 systems,
++however there is nothing x86 specific about this patchset. It was
++originally written for use by the "RT" patch since the Real Time
++kernel is highly latency sensitive.
++
++SMIs are usually not serviced by the Linux kernel, which typically does not
++even know that they are occuring. SMIs are instead are set up by BIOS code
++and are serviced by BIOS code, usually for "critical" events such as
++management of thermal sensors and fans. Sometimes though, SMIs are used for
++other tasks and those tasks can spend an inordinate amount of time in the
++handler (sometimes measured in milliseconds). Obviously this is a problem if
++you are trying to keep event service latencies down in the microsecond range.
++
++The hardware latency detector works by hogging all of the cpus for configurable
++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
++for some period, then looking for gaps in the TSC data. Any gap indicates a
++time when the polling was interrupted and since the machine is stopped and
++interrupts turned off the only thing that could do that would be an SMI.
++
++Note that the SMI detector should *NEVER* be used in a production environment.
++It is intended to be run manually to determine if the hardware platform has a
++problem with long system firmware service routines.
++
++Usage:
++------
++
++Loading the module hwlat_detector passing the parameter "enabled=1" (or by
++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
++step required to start the hwlat_detector. It is possible to redefine the
++threshold in microseconds (us) above which latency spikes will be taken
++into account (parameter "threshold=").
++
++Example:
++
++ # modprobe hwlat_detector enabled=1 threshold=100
++
++After the module is loaded, it creates a directory named "hwlat_detector" under
++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
++to have debugfs mounted, which might be on /sys/debug on your system.
++
++The /debug/hwlat_detector interface contains the following files:
++
++count - number of latency spikes observed since last reset
++enable - a global enable/disable toggle (0/1), resets count
++max - maximum hardware latency actually observed (usecs)
++sample - a pipe from which to read current raw sample data
++ in the format <timestamp> <latency observed usecs>
++ (can be opened O_NONBLOCK for a single sample)
++threshold - minimum latency value to be considered (usecs)
++width - time period to sample with CPUs held (usecs)
++ must be less than the total window size (enforced)
++window - total period of sampling, width being inside (usecs)
++
++By default we will set width to 500,000 and window to 1,000,000, meaning that
++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
++observe any latencies that exceed the threshold (initially 100 usecs),
++then we write to a global sample ring buffer of 8K samples, which is
++consumed by reading from the "sample" (pipe) debugfs file interface.
+diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
+index 0e307c94809a..6964d0f80ae7 100644
+--- a/Documentation/sysrq.txt
++++ b/Documentation/sysrq.txt
+@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
+ On other - If you know of the key combos for other architectures, please
+ let me know so I can add them to this section.
+
+-On all - write a character to /proc/sysrq-trigger. e.g.:
+-
++On all - write a character to /proc/sysrq-trigger, e.g.:
+ echo t > /proc/sysrq-trigger
+
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++ Send an ICMP echo request with this pattern plus the particular
++ SysRq command key. Example:
++ # ping -c1 -s57 -p0102030468
++ will trigger the SysRq-H (help) command.
++
++
+ * What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b' - Will immediately reboot the system without syncing or unmounting
+diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
+new file mode 100644
+index 000000000000..6f2aeabf7faa
+--- /dev/null
++++ b/Documentation/trace/histograms.txt
+@@ -0,0 +1,186 @@
++ Using the Linux Kernel Latency Histograms
++
++
++This document gives a short explanation how to enable, configure and use
++latency histograms. Latency histograms are primarily relevant in the
++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
++and are used in the quality management of the Linux real-time
++capabilities.
++
++
++* Purpose of latency histograms
++
++A latency histogram continuously accumulates the frequencies of latency
++data. There are two types of histograms
++- potential sources of latencies
++- effective latencies
++
++
++* Potential sources of latencies
++
++Potential sources of latencies are code segments where interrupts,
++preemption or both are disabled (aka critical sections). To create
++histograms of potential sources of latency, the kernel stores the time
++stamp at the start of a critical section, determines the time elapsed
++when the end of the section is reached, and increments the frequency
++counter of that latency value - irrespective of whether any concurrently
++running process is affected by latency or not.
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_INTERRUPT_OFF_LATENCY
++ CONFIG_PREEMPT_OFF_LATENCY
++
++
++* Effective latencies
++
++Effective latencies are actually occuring during wakeup of a process. To
++determine effective latencies, the kernel stores the time stamp when a
++process is scheduled to be woken up, and determines the duration of the
++wakeup time shortly before control is passed over to this process. Note
++that the apparent latency in user space may be somewhat longer, since the
++process may be interrupted after control is passed over to it but before
++the execution in user space takes place. Simply measuring the interval
++between enqueuing and wakeup may also not appropriate in cases when a
++process is scheduled as a result of a timer expiration. The timer may have
++missed its deadline, e.g. due to disabled interrupts, but this latency
++would not be registered. Therefore, the offsets of missed timers are
++recorded in a separate histogram. If both wakeup latency and missed timer
++offsets are configured and enabled, a third histogram may be enabled that
++records the overall latency as a sum of the timer latency, if any, and the
++wakeup latency. This histogram is called "timerandwakeup".
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_WAKEUP_LATENCY
++ CONFIG_MISSED_TIMER_OFSETS
++
++
++* Usage
++
++The interface to the administration of the latency histograms is located
++in the debugfs file system. To mount it, either enter
++
++mount -t sysfs nodev /sys
++mount -t debugfs nodev /sys/kernel/debug
++
++from shell command line level, or add
++
++nodev /sys sysfs defaults 0 0
++nodev /sys/kernel/debug debugfs defaults 0 0
++
++to the file /etc/fstab. All latency histogram related files are then
++available in the directory /sys/kernel/debug/tracing/latency_hist. A
++particular histogram type is enabled by writing non-zero to the related
++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
++Select "preemptirqsoff" for the histograms of potential sources of
++latencies and "wakeup" for histograms of effective latencies etc. The
++histogram data - one per CPU - are available in the files
++
++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
++
++The histograms are reset by writing non-zero to the file "reset" in a
++particular latency directory. To reset all latency data, use
++
++#!/bin/sh
++
++TRACINGDIR=/sys/kernel/debug/tracing
++HISTDIR=$TRACINGDIR/latency_hist
++
++if test -d $HISTDIR
++then
++ cd $HISTDIR
++ for i in `find . | grep /reset$`
++ do
++ echo 1 >$i
++ done
++fi
++
++
++* Data format
++
++Latency data are stored with a resolution of one microsecond. The
++maximum latency is 10,240 microseconds. The data are only valid, if the
++overflow register is empty. Every output line contains the latency in
++microseconds in the first row and the number of samples in the second
++row. To display only lines with a positive latency count, use, for
++example,
++
++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
++
++#Minimum latency: 0 microseconds.
++#Average latency: 0 microseconds.
++#Maximum latency: 25 microseconds.
++#Total samples: 3104770694
++#There are 0 samples greater or equal than 10240 microseconds
++#usecs samples
++ 0 2984486876
++ 1 49843506
++ 2 58219047
++ 3 5348126
++ 4 2187960
++ 5 3388262
++ 6 959289
++ 7 208294
++ 8 40420
++ 9 4485
++ 10 14918
++ 11 18340
++ 12 25052
++ 13 19455
++ 14 5602
++ 15 969
++ 16 47
++ 17 18
++ 18 14
++ 19 1
++ 20 3
++ 21 2
++ 22 5
++ 23 2
++ 25 1
++
++
++* Wakeup latency of a selected process
++
++To only collect wakeup latency data of a particular process, write the
++PID of the requested process to
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/pid
++
++PIDs are not considered, if this variable is set to 0.
++
++
++* Details of the process with the highest wakeup latency so far
++
++Selected data of the process that suffered from the highest wakeup
++latency that occurred in a particular CPU are available in the file
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
++
++In addition, other relevant system data at the time when the
++latency occurred are given.
++
++The format of the data is (all in one line):
++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
++<- <PID> <Priority> <Command> <Timestamp>
++
++The value of <Timeroffset> is only relevant in the combined timer
++and wakeup latency recording. In the wakeup recording, it is
++always 0, in the missed_timer_offsets recording, it is the same
++as <Latency>.
++
++When retrospectively searching for the origin of a latency and
++tracing was not enabled, it may be helpful to know the name and
++some basic data of the task that (finally) was switching to the
++late real-tlme task. In addition to the victim's data, also the
++data of the possible culprit are therefore displayed after the
++"<-" symbol.
++
++Finally, the timestamp of the time when the latency occurred
++in <seconds>.<microseconds> after the most recent system boot
++is provided.
++
++These data are also reset when the wakeup histogram is reset.
+diff --git a/arch/Kconfig b/arch/Kconfig
+index a65eafb24997..78d3ed24484a 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -6,6 +6,7 @@ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+@@ -49,6 +50,7 @@ config KPROBES
+ config JUMP_LABEL
+ bool "Optimize very unlikely/likely branches"
+ depends on HAVE_ARCH_JUMP_LABEL
++ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
+ help
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 9d0ac091a52a..4a905bd667e2 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
@@ -23,8 +23,7 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -11,7 +318,7 @@ diff -Nur linux-4.1.26.orig/arch/alpha/mm/fault.c linux-4.1.26/arch/alpha/mm/fau
extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
-@@ -107,7 +106,7 @@
+@@ -107,7 +106,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */
@@ -20,10 +327,11 @@ diff -Nur linux-4.1.26.orig/arch/alpha/mm/fault.c linux-4.1.26/arch/alpha/mm/fau
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
-diff -Nur linux-4.1.26.orig/arch/arc/include/asm/futex.h linux-4.1.26/arch/arc/include/asm/futex.h
---- linux-4.1.26.orig/arch/arc/include/asm/futex.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arc/include/asm/futex.h 2016-06-19 15:30:54.915151887 +0200
-@@ -53,7 +53,7 @@
+diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
+index 4dc64ddebece..05b5aaf5b0f9 100644
+--- a/arch/arc/include/asm/futex.h
++++ b/arch/arc/include/asm/futex.h
+@@ -53,7 +53,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
@@ -32,7 +340,7 @@ diff -Nur linux-4.1.26.orig/arch/arc/include/asm/futex.h linux-4.1.26/arch/arc/i
switch (op) {
case FUTEX_OP_SET:
-@@ -75,7 +75,7 @@
+@@ -75,7 +75,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
@@ -41,7 +349,7 @@ diff -Nur linux-4.1.26.orig/arch/arc/include/asm/futex.h linux-4.1.26/arch/arc/i
if (!ret) {
switch (cmp) {
-@@ -104,7 +104,7 @@
+@@ -104,7 +104,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret;
}
@@ -50,7 +358,7 @@ diff -Nur linux-4.1.26.orig/arch/arc/include/asm/futex.h linux-4.1.26/arch/arc/i
* Notes:
* -Best-Effort: Exchg happens only if compare succeeds.
* If compare fails, returns; leaving retry/looping to upper layers
-@@ -121,7 +121,7 @@
+@@ -121,7 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
@@ -59,7 +367,7 @@ diff -Nur linux-4.1.26.orig/arch/arc/include/asm/futex.h linux-4.1.26/arch/arc/i
/* TBD : can use llock/scond */
__asm__ __volatile__(
-@@ -142,7 +142,7 @@
+@@ -142,7 +142,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
: "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
: "cc", "memory");
@@ -68,10 +376,11 @@ diff -Nur linux-4.1.26.orig/arch/arc/include/asm/futex.h linux-4.1.26/arch/arc/i
*uval = val;
return val;
-diff -Nur linux-4.1.26.orig/arch/arc/mm/fault.c linux-4.1.26/arch/arc/mm/fault.c
---- linux-4.1.26.orig/arch/arc/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arc/mm/fault.c 2016-06-19 15:30:54.915151887 +0200
-@@ -86,7 +86,7 @@
+diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
+index 6a2e006cbcce..d948e4e9d89c 100644
+--- a/arch/arc/mm/fault.c
++++ b/arch/arc/mm/fault.c
+@@ -86,7 +86,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -80,10 +389,32 @@ diff -Nur linux-4.1.26.orig/arch/arc/mm/fault.c linux-4.1.26/arch/arc/mm/fault.c
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.26/arch/arm/include/asm/cmpxchg.h
---- linux-4.1.26.orig/arch/arm/include/asm/cmpxchg.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/include/asm/cmpxchg.h 2016-06-19 15:30:54.919152041 +0200
-@@ -129,6 +129,8 @@
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 19f4cc634b0e..e16a259177d4 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -31,7 +31,7 @@ config ARM
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
++ select HAVE_ARCH_JUMP_LABEL if (!XIP_KERNEL && !PREEMPT_RT_BASE)
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_TRACEHOOK
+@@ -66,6 +66,7 @@ config ARM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
+index abb2c3769b01..2386e9745ba4 100644
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -129,6 +129,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#else /* min ARCH >= ARMv6 */
@@ -92,10 +423,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.26/arch/arm
extern void __bad_cmpxchg(volatile void *ptr, int size);
/*
-diff -Nur linux-4.1.26.orig/arch/arm/include/asm/futex.h linux-4.1.26/arch/arm/include/asm/futex.h
---- linux-4.1.26.orig/arch/arm/include/asm/futex.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/include/asm/futex.h 2016-06-19 15:30:54.919152041 +0200
-@@ -93,6 +93,7 @@
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 4e78065a16aa..5eed82809d82 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -93,6 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -103,7 +435,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/futex.h linux-4.1.26/arch/arm/i
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
-@@ -104,6 +105,8 @@
+@@ -104,6 +105,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory");
*uval = val;
@@ -112,7 +444,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/futex.h linux-4.1.26/arch/arm/i
return ret;
}
-@@ -124,7 +127,10 @@
+@@ -124,7 +127,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -124,7 +456,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/futex.h linux-4.1.26/arch/arm/i
switch (op) {
case FUTEX_OP_SET:
-@@ -146,7 +152,10 @@
+@@ -146,7 +152,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
@@ -136,9 +468,10 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/futex.h linux-4.1.26/arch/arm/i
if (!ret) {
switch (cmp) {
-diff -Nur linux-4.1.26.orig/arch/arm/include/asm/switch_to.h linux-4.1.26/arch/arm/include/asm/switch_to.h
---- linux-4.1.26.orig/arch/arm/include/asm/switch_to.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/include/asm/switch_to.h 2016-06-19 15:30:54.919152041 +0200
+diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
+index c99e259469f7..f3e3d800c407 100644
+--- a/arch/arm/include/asm/switch_to.h
++++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
@@ -153,7 +486,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/switch_to.h linux-4.1.26/arch/a
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -22,6 +29,7 @@
+@@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
@@ -161,10 +494,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/switch_to.h linux-4.1.26/arch/a
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
-diff -Nur linux-4.1.26.orig/arch/arm/include/asm/thread_info.h linux-4.1.26/arch/arm/include/asm/thread_info.h
---- linux-4.1.26.orig/arch/arm/include/asm/thread_info.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/include/asm/thread_info.h 2016-06-19 15:30:54.919152041 +0200
-@@ -50,6 +50,7 @@
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index bd32eded3e50..b5a616376f60 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -50,6 +50,7 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
@@ -172,7 +506,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/thread_info.h linux-4.1.26/arch
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
-@@ -147,6 +148,7 @@
+@@ -147,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
@@ -180,7 +514,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/thread_info.h linux-4.1.26/arch
#define TIF_UPROBE 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -160,6 +162,7 @@
+@@ -160,6 +162,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -188,30 +522,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/include/asm/thread_info.h linux-4.1.26/arch
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-4.1.26.orig/arch/arm/Kconfig linux-4.1.26/arch/arm/Kconfig
---- linux-4.1.26.orig/arch/arm/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/Kconfig 2016-06-19 15:30:54.919152041 +0200
-@@ -31,7 +31,7 @@
- select HARDIRQS_SW_RESEND
- select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
-+ select HAVE_ARCH_JUMP_LABEL if (!XIP_KERNEL && !PREEMPT_RT_BASE)
- select HAVE_ARCH_KGDB
- select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_TRACEHOOK
-@@ -66,6 +66,7 @@
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_SYSCALL_TRACEPOINTS
-diff -Nur linux-4.1.26.orig/arch/arm/kernel/asm-offsets.c linux-4.1.26/arch/arm/kernel/asm-offsets.c
---- linux-4.1.26.orig/arch/arm/kernel/asm-offsets.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kernel/asm-offsets.c 2016-06-19 15:30:54.919152041 +0200
-@@ -65,6 +65,7 @@
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index 871b8267d211..4dbe70de7318 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -65,6 +65,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -219,10 +534,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/asm-offsets.c linux-4.1.26/arch/arm/
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.26.orig/arch/arm/kernel/entry-armv.S linux-4.1.26/arch/arm/kernel/entry-armv.S
---- linux-4.1.26.orig/arch/arm/kernel/entry-armv.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kernel/entry-armv.S 2016-06-19 15:30:54.919152041 +0200
-@@ -208,11 +208,18 @@
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 570306c49406..797a13d959b7 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -208,11 +208,18 @@ __irq_svc:
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -243,19 +559,27 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/entry-armv.S linux-4.1.26/arch/arm/k
#endif
svc_exit r5, irq = 1 @ return from exception
-@@ -227,6 +234,8 @@
+@@ -227,8 +234,14 @@ svc_preempt:
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
+ bne 1b
+ tst r0, #_TIF_NEED_RESCHED_LAZY
reteq r8 @ go again
- b 1b
+- b 1b
++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r0, #0 @ if preempt lazy count != 0
++ beq 1b
++ ret r8 @ go again
++
#endif
-diff -Nur linux-4.1.26.orig/arch/arm/kernel/process.c linux-4.1.26/arch/arm/kernel/process.c
---- linux-4.1.26.orig/arch/arm/kernel/process.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kernel/process.c 2016-06-19 15:30:54.919152041 +0200
-@@ -290,6 +290,30 @@
+
+ __und_fault:
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index f192a2a41719..649247ac00e6 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -290,6 +290,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
@@ -286,10 +610,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/process.c linux-4.1.26/arch/arm/kern
#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
-diff -Nur linux-4.1.26.orig/arch/arm/kernel/signal.c linux-4.1.26/arch/arm/kernel/signal.c
---- linux-4.1.26.orig/arch/arm/kernel/signal.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kernel/signal.c 2016-06-19 15:30:54.919152041 +0200
-@@ -568,7 +568,8 @@
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 586eef26203d..25bd12ef0b36 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -568,7 +568,8 @@ asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
do {
@@ -299,10 +624,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/signal.c linux-4.1.26/arch/arm/kerne
schedule();
} else {
if (unlikely(!user_mode(regs)))
-diff -Nur linux-4.1.26.orig/arch/arm/kernel/smp.c linux-4.1.26/arch/arm/kernel/smp.c
---- linux-4.1.26.orig/arch/arm/kernel/smp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kernel/smp.c 2016-06-19 15:30:54.919152041 +0200
-@@ -213,8 +213,6 @@
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index f11d82527076..e561aef093c7 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -213,8 +213,6 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
@@ -311,7 +637,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/smp.c linux-4.1.26/arch/arm/kernel/s
return 0;
}
-@@ -230,6 +228,9 @@
+@@ -230,6 +228,9 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu);
return;
}
@@ -321,10 +647,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/smp.c linux-4.1.26/arch/arm/kernel/s
pr_notice("CPU%u: shutdown\n", cpu);
/*
-diff -Nur linux-4.1.26.orig/arch/arm/kernel/unwind.c linux-4.1.26/arch/arm/kernel/unwind.c
---- linux-4.1.26.orig/arch/arm/kernel/unwind.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kernel/unwind.c 2016-06-19 15:30:54.919152041 +0200
-@@ -93,7 +93,7 @@
+diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
+index 0bee233fef9a..314cfb232a63 100644
+--- a/arch/arm/kernel/unwind.c
++++ b/arch/arm/kernel/unwind.c
+@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
@@ -333,7 +660,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/unwind.c linux-4.1.26/arch/arm/kerne
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
-@@ -201,7 +201,7 @@
+@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
@@ -342,7 +669,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/unwind.c linux-4.1.26/arch/arm/kerne
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
-@@ -213,7 +213,7 @@
+@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
@@ -351,7 +678,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/unwind.c linux-4.1.26/arch/arm/kerne
}
pr_debug("%s: idx = %p\n", __func__, idx);
-@@ -529,9 +529,9 @@
+@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
@@ -363,7 +690,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/unwind.c linux-4.1.26/arch/arm/kerne
return tab;
}
-@@ -543,9 +543,9 @@
+@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
@@ -375,10 +702,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/kernel/unwind.c linux-4.1.26/arch/arm/kerne
kfree(tab);
}
-diff -Nur linux-4.1.26.orig/arch/arm/kvm/arm.c linux-4.1.26/arch/arm/kvm/arm.c
---- linux-4.1.26.orig/arch/arm/kvm/arm.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kvm/arm.c 2016-06-19 15:30:54.919152041 +0200
-@@ -474,9 +474,9 @@
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 87b2663a5564..ce5e58d76cf9 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -473,9 +473,9 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
static void vcpu_pause(struct kvm_vcpu *vcpu)
{
@@ -390,10 +718,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/kvm/arm.c linux-4.1.26/arch/arm/kvm/arm.c
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-diff -Nur linux-4.1.26.orig/arch/arm/kvm/psci.c linux-4.1.26/arch/arm/kvm/psci.c
---- linux-4.1.26.orig/arch/arm/kvm/psci.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/kvm/psci.c 2016-06-19 15:30:54.919152041 +0200
-@@ -68,7 +68,7 @@
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index 531e922486b2..e24f0461ea2d 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -68,7 +68,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
@@ -402,7 +731,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/kvm/psci.c linux-4.1.26/arch/arm/kvm/psci.c
unsigned long cpu_id;
unsigned long context_id;
phys_addr_t target_pc;
-@@ -117,7 +117,7 @@
+@@ -117,7 +117,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
@@ -411,10 +740,242 @@ diff -Nur linux-4.1.26.orig/arch/arm/kvm/psci.c linux-4.1.26/arch/arm/kvm/psci.c
return PSCI_RET_SUCCESS;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.26/arch/arm/mach-exynos/platsmp.c
---- linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-exynos/platsmp.c 2016-06-19 15:30:54.919152041 +0200
-@@ -231,7 +231,7 @@
+diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
+index eaf58f88ef5d..8d3cb458a99c 100644
+--- a/arch/arm/mach-at91/at91rm9200.c
++++ b/arch/arm/mach-at91/at91rm9200.c
+@@ -13,7 +13,6 @@
+ #include <linux/of_platform.h>
+
+ #include <asm/mach/arch.h>
+-#include <asm/system_misc.h>
+
+ #include "generic.h"
+ #include "soc.h"
+@@ -34,7 +33,6 @@ static void __init at91rm9200_dt_device_init(void)
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+
+- arm_pm_idle = at91rm9200_idle;
+ at91rm9200_pm_init();
+ }
+
+diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
+index e47a2093a0e7..d2bede665a1b 100644
+--- a/arch/arm/mach-at91/at91sam9.c
++++ b/arch/arm/mach-at91/at91sam9.c
+@@ -62,8 +62,6 @@ static void __init at91sam9_common_init(void)
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+-
+- arm_pm_idle = at91sam9_idle;
+ }
+
+ static void __init at91sam9_dt_device_init(void)
+diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
+index b0fa7dc7286d..28ca57a2060f 100644
+--- a/arch/arm/mach-at91/generic.h
++++ b/arch/arm/mach-at91/generic.h
+@@ -11,27 +11,18 @@
+ #ifndef _AT91_GENERIC_H
+ #define _AT91_GENERIC_H
+
+-#include <linux/of.h>
+-#include <linux/reboot.h>
+-
+- /* Map io */
+-extern void __init at91_map_io(void);
+-extern void __init at91_alt_map_io(void);
+-
+-/* idle */
+-extern void at91rm9200_idle(void);
+-extern void at91sam9_idle(void);
+-
+ #ifdef CONFIG_PM
+ extern void __init at91rm9200_pm_init(void);
+ extern void __init at91sam9260_pm_init(void);
+ extern void __init at91sam9g45_pm_init(void);
+ extern void __init at91sam9x5_pm_init(void);
++extern void __init sama5_pm_init(void);
+ #else
+ static inline void __init at91rm9200_pm_init(void) { }
+ static inline void __init at91sam9260_pm_init(void) { }
+ static inline void __init at91sam9g45_pm_init(void) { }
+ static inline void __init at91sam9x5_pm_init(void) { }
++static inline void __init sama5_pm_init(void) { }
+ #endif
+
+ #endif /* _AT91_GENERIC_H */
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 5062699cbb12..3be82cf983dd 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -31,10 +31,13 @@
+ #include <asm/mach/irq.h>
+ #include <asm/fncpy.h>
+ #include <asm/cacheflush.h>
++#include <asm/system_misc.h>
+
+ #include "generic.h"
+ #include "pm.h"
+
++static void __iomem *pmc;
++
+ /*
+ * FIXME: this is needed to communicate between the pinctrl driver and
+ * the PM implementation in the machine. Possibly part of the PM
+@@ -85,7 +88,7 @@ static int at91_pm_verify_clocks(void)
+ unsigned long scsr;
+ int i;
+
+- scsr = at91_pmc_read(AT91_PMC_SCSR);
++ scsr = readl(pmc + AT91_PMC_SCSR);
+
+ /* USB must not be using PLLB */
+ if ((scsr & at91_pm_data.uhp_udp_mask) != 0) {
+@@ -99,8 +102,7 @@ static int at91_pm_verify_clocks(void)
+
+ if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
+ continue;
+-
+- css = at91_pmc_read(AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
++ css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
+ if (css != AT91_PMC_CSS_SLOW) {
+ pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
+ return 0;
+@@ -143,8 +145,8 @@ static void at91_pm_suspend(suspend_state_t state)
+ flush_cache_all();
+ outer_disable();
+
+- at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0],
+- at91_ramc_base[1], pm_data);
++ at91_suspend_sram_fn(pmc, at91_ramc_base[0],
++ at91_ramc_base[1], pm_data);
+
+ outer_resume();
+ }
+@@ -348,6 +350,21 @@ static __init void at91_dt_ramc(void)
+ at91_pm_set_standby(standby);
+ }
+
++void at91rm9200_idle(void)
++{
++ /*
++ * Disable the processor clock. The processor will be automatically
++ * re-enabled by an interrupt or by a reset.
++ */
++ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
++}
++
++void at91sam9_idle(void)
++{
++ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
++ cpu_do_idle();
++}
++
+ static void __init at91_pm_sram_init(void)
+ {
+ struct gen_pool *sram_pool;
+@@ -394,13 +411,36 @@ static void __init at91_pm_sram_init(void)
+ &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
+ }
+
+-static void __init at91_pm_init(void)
++static const struct of_device_id atmel_pmc_ids[] __initconst = {
++ { .compatible = "atmel,at91rm9200-pmc" },
++ { .compatible = "atmel,at91sam9260-pmc" },
++ { .compatible = "atmel,at91sam9g45-pmc" },
++ { .compatible = "atmel,at91sam9n12-pmc" },
++ { .compatible = "atmel,at91sam9x5-pmc" },
++ { .compatible = "atmel,sama5d3-pmc" },
++ { .compatible = "atmel,sama5d2-pmc" },
++ { /* sentinel */ },
++};
++
++static void __init at91_pm_init(void (*pm_idle)(void))
+ {
+- at91_pm_sram_init();
++ struct device_node *pmc_np;
+
+ if (at91_cpuidle_device.dev.platform_data)
+ platform_device_register(&at91_cpuidle_device);
+
++ pmc_np = of_find_matching_node(NULL, atmel_pmc_ids);
++ pmc = of_iomap(pmc_np, 0);
++ if (!pmc) {
++ pr_err("AT91: PM not supported, PMC not found\n");
++ return;
++ }
++
++ if (pm_idle)
++ arm_pm_idle = pm_idle;
++
++ at91_pm_sram_init();
++
+ if (at91_suspend_sram_fn)
+ suspend_set_ops(&at91_pm_ops);
+ else
+@@ -419,7 +459,7 @@ void __init at91rm9200_pm_init(void)
+ at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP;
+ at91_pm_data.memctrl = AT91_MEMCTRL_MC;
+
+- at91_pm_init();
++ at91_pm_init(at91rm9200_idle);
+ }
+
+ void __init at91sam9260_pm_init(void)
+@@ -427,7 +467,7 @@ void __init at91sam9260_pm_init(void)
+ at91_dt_ramc();
+ at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC;
+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
+- return at91_pm_init();
++ at91_pm_init(at91sam9_idle);
+ }
+
+ void __init at91sam9g45_pm_init(void)
+@@ -435,7 +475,7 @@ void __init at91sam9g45_pm_init(void)
+ at91_dt_ramc();
+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP;
+ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
+- return at91_pm_init();
++ at91_pm_init(at91sam9_idle);
+ }
+
+ void __init at91sam9x5_pm_init(void)
+@@ -443,5 +483,13 @@ void __init at91sam9x5_pm_init(void)
+ at91_dt_ramc();
+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
+ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
+- return at91_pm_init();
++ at91_pm_init(at91sam9_idle);
++}
++
++void __init sama5_pm_init(void)
++{
++ at91_dt_ramc();
++ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
++ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
++ at91_pm_init(NULL);
+ }
+diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
+index 41d829d8e7d5..3755da6decf5 100644
+--- a/arch/arm/mach-at91/sama5.c
++++ b/arch/arm/mach-at91/sama5.c
+@@ -49,7 +49,7 @@ static void __init sama5_dt_device_init(void)
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+- at91sam9x5_pm_init();
++ sama5_pm_init();
+ }
+
+ static const char *sama5_dt_board_compat[] __initconst = {
+diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
+index a825bca2a2b6..4619e228df41 100644
+--- a/arch/arm/mach-exynos/platsmp.c
++++ b/arch/arm/mach-exynos/platsmp.c
+@@ -231,7 +231,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
@@ -423,7 +984,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.26/arch/arm
static void exynos_secondary_init(unsigned int cpu)
{
-@@ -244,8 +244,8 @@
+@@ -244,8 +244,8 @@ static void exynos_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -434,7 +995,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.26/arch/arm
}
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -259,7 +259,7 @@
+@@ -259,7 +259,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -443,7 +1004,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.26/arch/arm
/*
* The secondary processor is waiting to be released from
-@@ -286,7 +286,7 @@
+@@ -286,7 +286,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
@@ -452,7 +1013,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.26/arch/arm
return -ETIMEDOUT;
}
}
-@@ -342,7 +342,7 @@
+@@ -342,7 +342,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
* calibrations, then wait for it to finish
*/
fail:
@@ -461,9 +1022,10 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.26/arch/arm
return pen_release != -1 ? ret : 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/mach-hisi/platmcpm.c
---- linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-hisi/platmcpm.c 2016-06-19 15:30:54.923152195 +0200
+diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
+index 280f3f14f77c..bc2ed95c0e62 100644
+--- a/arch/arm/mach-hisi/platmcpm.c
++++ b/arch/arm/mach-hisi/platmcpm.c
@@ -57,7 +57,7 @@
static void __iomem *sysctrl, *fabric;
@@ -473,7 +1035,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
static u32 fabric_phys_addr;
/*
* [0]: bootwrapper physical address
-@@ -104,7 +104,7 @@
+@@ -104,7 +104,7 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
return -EINVAL;
@@ -482,7 +1044,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
if (hip04_cpu_table[cluster][cpu])
goto out;
-@@ -133,7 +133,7 @@
+@@ -133,7 +133,7 @@ static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
udelay(20);
out:
hip04_cpu_table[cluster][cpu]++;
@@ -491,7 +1053,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
return 0;
}
-@@ -149,7 +149,7 @@
+@@ -149,7 +149,7 @@ static void hip04_mcpm_power_down(void)
__mcpm_cpu_going_down(cpu, cluster);
@@ -500,7 +1062,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
hip04_cpu_table[cluster][cpu]--;
if (hip04_cpu_table[cluster][cpu] == 1) {
-@@ -162,7 +162,7 @@
+@@ -162,7 +162,7 @@ static void hip04_mcpm_power_down(void)
last_man = hip04_cluster_is_down(cluster);
if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
@@ -509,7 +1071,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
/* Since it's Cortex A15, disable L2 prefetching. */
asm volatile(
"mcr p15, 1, %0, c15, c0, 3 \n\t"
-@@ -173,7 +173,7 @@
+@@ -173,7 +173,7 @@ static void hip04_mcpm_power_down(void)
hip04_set_snoop_filter(cluster, 0);
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
} else {
@@ -518,7 +1080,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
v7_exit_coherency_flush(louis);
}
-@@ -192,7 +192,7 @@
+@@ -192,7 +192,7 @@ static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
count = TIMEOUT_MSEC / POLL_MSEC;
@@ -527,7 +1089,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
for (tries = 0; tries < count; tries++) {
if (hip04_cpu_table[cluster][cpu]) {
ret = -EBUSY;
-@@ -202,10 +202,10 @@
+@@ -202,10 +202,10 @@ static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
if (data & CORE_WFI_STATUS(cpu))
break;
@@ -540,7 +1102,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
}
if (tries >= count)
goto err;
-@@ -220,10 +220,10 @@
+@@ -220,10 +220,10 @@ static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
}
if (tries >= count)
goto err;
@@ -553,7 +1115,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
return ret;
}
-@@ -235,10 +235,10 @@
+@@ -235,10 +235,10 @@ static void hip04_mcpm_powered_up(void)
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
@@ -566,10 +1128,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.26/arch/arm/
}
static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
-diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/gpio.c linux-4.1.26/arch/arm/mach-omap2/gpio.c
---- linux-4.1.26.orig/arch/arm/mach-omap2/gpio.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-omap2/gpio.c 2016-06-19 15:30:54.923152195 +0200
-@@ -130,7 +130,6 @@
+diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
+index 7a577145b68b..689a1af47c80 100644
+--- a/arch/arm/mach-omap2/gpio.c
++++ b/arch/arm/mach-omap2/gpio.c
+@@ -130,7 +130,6 @@ static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
}
pwrdm = omap_hwmod_get_pwrdm(oh);
@@ -577,9 +1140,10 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/gpio.c linux-4.1.26/arch/arm/mac
pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
kfree(pdata);
-diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.26/arch/arm/mach-omap2/omap-smp.c
---- linux-4.1.26.orig/arch/arm/mach-omap2/omap-smp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-omap2/omap-smp.c 2016-06-19 15:30:54.923152195 +0200
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index 5305ec7341ec..19732b56088b 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
@@ -43,7 +43,7 @@
/* SCU base address */
static void __iomem *scu_base;
@@ -589,7 +1153,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.26/arch/arm
void __iomem *omap4_get_scu_base(void)
{
-@@ -74,8 +74,8 @@
+@@ -74,8 +74,8 @@ static void omap4_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -600,7 +1164,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.26/arch/arm
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -89,7 +89,7 @@
+@@ -89,7 +89,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -609,7 +1173,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.26/arch/arm
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -166,7 +166,7 @@
+@@ -166,7 +166,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -618,10 +1182,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.26/arch/arm
return 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/powerdomain.c linux-4.1.26/arch/arm/mach-omap2/powerdomain.c
---- linux-4.1.26.orig/arch/arm/mach-omap2/powerdomain.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-omap2/powerdomain.c 2016-06-19 15:30:54.923152195 +0200
-@@ -1166,43 +1166,3 @@
+diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
+index 78af6d8cf2e2..ef4227ffa3b6 100644
+--- a/arch/arm/mach-omap2/powerdomain.c
++++ b/arch/arm/mach-omap2/powerdomain.c
+@@ -1166,43 +1166,3 @@ int pwrdm_get_context_loss_count(struct powerdomain *pwrdm)
return count;
}
@@ -665,10 +1230,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/powerdomain.c linux-4.1.26/arch/
-
- return 0;
-}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/powerdomain.h linux-4.1.26/arch/arm/mach-omap2/powerdomain.h
---- linux-4.1.26.orig/arch/arm/mach-omap2/powerdomain.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-omap2/powerdomain.h 2016-06-19 15:30:54.923152195 +0200
-@@ -244,7 +244,6 @@
+diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
+index 28a796ce07d7..5e0c033a21db 100644
+--- a/arch/arm/mach-omap2/powerdomain.h
++++ b/arch/arm/mach-omap2/powerdomain.h
+@@ -244,7 +244,6 @@ int pwrdm_state_switch(struct powerdomain *pwrdm);
int pwrdm_pre_transition(struct powerdomain *pwrdm);
int pwrdm_post_transition(struct powerdomain *pwrdm);
int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
@@ -676,9 +1242,10 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-omap2/powerdomain.h linux-4.1.26/arch/
extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 state);
-diff -Nur linux-4.1.26.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.26/arch/arm/mach-prima2/platsmp.c
---- linux-4.1.26.orig/arch/arm/mach-prima2/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-prima2/platsmp.c 2016-06-19 15:30:54.923152195 +0200
+diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
+index e46c91094dde..dcb3ed0c26da 100644
+--- a/arch/arm/mach-prima2/platsmp.c
++++ b/arch/arm/mach-prima2/platsmp.c
@@ -22,7 +22,7 @@
static void __iomem *clk_base;
@@ -688,7 +1255,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.26/arch/arm
static void sirfsoc_secondary_init(unsigned int cpu)
{
-@@ -36,8 +36,8 @@
+@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -699,7 +1266,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.26/arch/arm
}
static const struct of_device_id clk_ids[] = {
-@@ -75,7 +75,7 @@
+@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
/* make sure write buffer is drained */
mb();
@@ -708,7 +1275,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.26/arch/arm
/*
* The secondary processor is waiting to be released from
-@@ -107,7 +107,7 @@
+@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -717,9 +1284,10 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.26/arch/arm
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.26/arch/arm/mach-qcom/platsmp.c
---- linux-4.1.26.orig/arch/arm/mach-qcom/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-qcom/platsmp.c 2016-06-19 15:30:54.923152195 +0200
+diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
+index 5cde63a64b34..82c9b9145c3e 100644
+--- a/arch/arm/mach-qcom/platsmp.c
++++ b/arch/arm/mach-qcom/platsmp.c
@@ -46,7 +46,7 @@
extern void secondary_startup_arm(void);
@@ -729,7 +1297,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.26/arch/arm/m
#ifdef CONFIG_HOTPLUG_CPU
static void __ref qcom_cpu_die(unsigned int cpu)
-@@ -60,8 +60,8 @@
+@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -740,7 +1308,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.26/arch/arm/m
}
static int scss_release_secondary(unsigned int cpu)
-@@ -284,7 +284,7 @@
+@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
* set synchronisation state between this boot processor
* and the secondary one
*/
@@ -749,7 +1317,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.26/arch/arm/m
/*
* Send the secondary CPU a soft interrupt, thereby causing
-@@ -297,7 +297,7 @@
+@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -758,10 +1326,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.26/arch/arm/m
return ret;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-spear/platsmp.c linux-4.1.26/arch/arm/mach-spear/platsmp.c
---- linux-4.1.26.orig/arch/arm/mach-spear/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-spear/platsmp.c 2016-06-19 15:30:54.923152195 +0200
-@@ -32,7 +32,7 @@
+diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
+index fd4297713d67..b0553b2c2d53 100644
+--- a/arch/arm/mach-spear/platsmp.c
++++ b/arch/arm/mach-spear/platsmp.c
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
@@ -770,7 +1339,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-spear/platsmp.c linux-4.1.26/arch/arm/
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
-@@ -47,8 +47,8 @@
+@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -781,7 +1350,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-spear/platsmp.c linux-4.1.26/arch/arm/
}
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -59,7 +59,7 @@
+@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
@@ -790,7 +1359,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-spear/platsmp.c linux-4.1.26/arch/arm/
/*
* The secondary processor is waiting to be released from
-@@ -84,7 +84,7 @@
+@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -799,10 +1368,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-spear/platsmp.c linux-4.1.26/arch/arm/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-sti/platsmp.c linux-4.1.26/arch/arm/mach-sti/platsmp.c
---- linux-4.1.26.orig/arch/arm/mach-sti/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-sti/platsmp.c 2016-06-19 15:30:54.923152195 +0200
-@@ -34,7 +34,7 @@
+diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
+index d4b624f8dfcb..56d4028122f5 100644
+--- a/arch/arm/mach-sti/platsmp.c
++++ b/arch/arm/mach-sti/platsmp.c
+@@ -34,7 +34,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
@@ -811,7 +1381,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-sti/platsmp.c linux-4.1.26/arch/arm/ma
static void sti_secondary_init(unsigned int cpu)
{
-@@ -49,8 +49,8 @@
+@@ -49,8 +49,8 @@ static void sti_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -822,7 +1392,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-sti/platsmp.c linux-4.1.26/arch/arm/ma
}
static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -61,7 +61,7 @@
+@@ -61,7 +61,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
@@ -831,7 +1401,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-sti/platsmp.c linux-4.1.26/arch/arm/ma
/*
* The secondary processor is waiting to be released from
-@@ -92,7 +92,7 @@
+@@ -92,7 +92,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -840,10 +1410,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-sti/platsmp.c linux-4.1.26/arch/arm/ma
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.26/arch/arm/mach-ux500/platsmp.c
---- linux-4.1.26.orig/arch/arm/mach-ux500/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mach-ux500/platsmp.c 2016-06-19 15:30:54.923152195 +0200
-@@ -51,7 +51,7 @@
+diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
+index a44967f3168c..3af22a4836bf 100644
+--- a/arch/arm/mach-ux500/platsmp.c
++++ b/arch/arm/mach-ux500/platsmp.c
+@@ -51,7 +51,7 @@ static void __iomem *scu_base_addr(void)
return NULL;
}
@@ -852,7 +1423,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.26/arch/arm/
static void ux500_secondary_init(unsigned int cpu)
{
-@@ -64,8 +64,8 @@
+@@ -64,8 +64,8 @@ static void ux500_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -863,7 +1434,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.26/arch/arm/
}
static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -76,7 +76,7 @@
+@@ -76,7 +76,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
* set synchronisation state between this boot processor
* and the secondary one
*/
@@ -872,7 +1443,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.26/arch/arm/
/*
* The secondary processor is waiting to be released from
-@@ -97,7 +97,7 @@
+@@ -97,7 +97,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -881,10 +1452,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.26/arch/arm/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mm/fault.c linux-4.1.26/arch/arm/mm/fault.c
---- linux-4.1.26.orig/arch/arm/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mm/fault.c 2016-06-19 15:30:54.923152195 +0200
-@@ -276,7 +276,7 @@
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 6333d9c17875..62016e3e4a9c 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -276,7 +276,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -893,7 +1465,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/fault.c linux-4.1.26/arch/arm/mm/fault.c
goto no_context;
if (user_mode(regs))
-@@ -430,6 +430,9 @@
+@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -903,7 +1475,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/fault.c linux-4.1.26/arch/arm/mm/fault.c
if (user_mode(regs))
goto bad_area;
-@@ -497,6 +500,9 @@
+@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -913,10 +1485,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/fault.c linux-4.1.26/arch/arm/mm/fault.c
do_bad_area(addr, fsr, regs);
return 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm/mm/highmem.c linux-4.1.26/arch/arm/mm/highmem.c
---- linux-4.1.26.orig/arch/arm/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/mm/highmem.c 2016-06-19 15:30:54.923152195 +0200
-@@ -54,11 +54,13 @@
+diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
+index b98895d9fe57..4050e9d99d6b 100644
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -54,11 +54,13 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
@@ -930,7 +1503,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/highmem.c linux-4.1.26/arch/arm/mm/highm
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -92,7 +94,10 @@
+@@ -92,7 +94,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
@@ -942,7 +1515,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/highmem.c linux-4.1.26/arch/arm/mm/highm
return (void *)vaddr;
}
-@@ -109,27 +114,33 @@
+@@ -109,27 +114,33 @@ void __kunmap_atomic(void *kvaddr)
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -977,7 +1550,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/highmem.c linux-4.1.26/arch/arm/mm/highm
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -140,7 +151,10 @@
+@@ -140,7 +151,10 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
@@ -989,7 +1562,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/highmem.c linux-4.1.26/arch/arm/mm/highm
return (void *)vaddr;
}
-@@ -154,3 +168,28 @@
+@@ -154,3 +168,28 @@ struct page *kmap_atomic_to_page(const void *ptr)
return pte_page(get_fixmap_pte(vaddr));
}
@@ -1018,10 +1591,11 @@ diff -Nur linux-4.1.26.orig/arch/arm/mm/highmem.c linux-4.1.26/arch/arm/mm/highm
+ }
+}
+#endif
-diff -Nur linux-4.1.26.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.26/arch/arm/plat-versatile/platsmp.c
---- linux-4.1.26.orig/arch/arm/plat-versatile/platsmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm/plat-versatile/platsmp.c 2016-06-19 15:30:54.923152195 +0200
-@@ -30,7 +30,7 @@
+diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
+index 53feb90c840c..b4a8d54fc3f3 100644
+--- a/arch/arm/plat-versatile/platsmp.c
++++ b/arch/arm/plat-versatile/platsmp.c
+@@ -30,7 +30,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
@@ -1030,7 +1604,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.26/arch/
void versatile_secondary_init(unsigned int cpu)
{
-@@ -43,8 +43,8 @@
+@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned int cpu)
/*
* Synchronise with the boot thread.
*/
@@ -1041,7 +1615,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.26/arch/
}
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -55,7 +55,7 @@
+@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -1050,7 +1624,7 @@ diff -Nur linux-4.1.26.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.26/arch/
/*
* This is really belt and braces; we hold unintended secondary
-@@ -85,7 +85,7 @@
+@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@@ -1059,10 +1633,35 @@ diff -Nur linux-4.1.26.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.26/arch/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/futex.h linux-4.1.26/arch/arm64/include/asm/futex.h
---- linux-4.1.26.orig/arch/arm64/include/asm/futex.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/include/asm/futex.h 2016-06-19 15:30:54.923152195 +0200
-@@ -58,7 +58,7 @@
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 6f0a3b41b009..09a41259b984 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -69,8 +69,10 @@ config ARM64
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
++ select HAVE_PREEMPT_LAZY
+ select HAVE_SYSCALL_TRACEPOINTS
+ select IRQ_DOMAIN
++ select IRQ_FORCED_THREADING
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+ select OF
+@@ -599,7 +601,7 @@ config XEN_DOM0
+
+ config XEN
+ bool "Xen guest support on ARM64"
+- depends on ARM64 && OF
++ depends on ARM64 && OF && !PREEMPT_RT_FULL
+ select SWIOTLB_XEN
+ help
+ Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 5f750dc96e0f..74069b3bd919 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -1071,7 +1670,7 @@ diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/futex.h linux-4.1.26/arch/arm
switch (op) {
case FUTEX_OP_SET:
-@@ -85,7 +85,7 @@
+@@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
@@ -1080,10 +1679,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/futex.h linux-4.1.26/arch/arm
if (!ret) {
switch (cmp) {
-diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/thread_info.h linux-4.1.26/arch/arm64/include/asm/thread_info.h
---- linux-4.1.26.orig/arch/arm64/include/asm/thread_info.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/include/asm/thread_info.h 2016-06-19 15:30:54.923152195 +0200
-@@ -47,6 +47,7 @@
+diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
+index dcd06d18a42a..7d45b00db1b3 100644
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -47,6 +47,7 @@ struct thread_info {
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
@@ -1091,7 +1691,7 @@ diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/thread_info.h linux-4.1.26/ar
int cpu; /* cpu */
};
-@@ -101,6 +102,7 @@
+@@ -101,6 +102,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
@@ -1099,7 +1699,7 @@ diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/thread_info.h linux-4.1.26/ar
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -117,6 +119,7 @@
+@@ -117,6 +119,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
@@ -1107,33 +1707,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/include/asm/thread_info.h linux-4.1.26/ar
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-4.1.26.orig/arch/arm64/Kconfig linux-4.1.26/arch/arm64/Kconfig
---- linux-4.1.26.orig/arch/arm64/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/Kconfig 2016-06-19 15:30:54.923152195 +0200
-@@ -69,8 +69,10 @@
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
- select HAVE_RCU_TABLE_FREE
-+ select HAVE_PREEMPT_LAZY
- select HAVE_SYSCALL_TRACEPOINTS
- select IRQ_DOMAIN
-+ select IRQ_FORCED_THREADING
- select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
- select OF
-@@ -599,7 +601,7 @@
-
- config XEN
- bool "Xen guest support on ARM64"
-- depends on ARM64 && OF
-+ depends on ARM64 && OF && !PREEMPT_RT_FULL
- select SWIOTLB_XEN
- help
- Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
-diff -Nur linux-4.1.26.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.26/arch/arm64/kernel/asm-offsets.c
---- linux-4.1.26.orig/arch/arm64/kernel/asm-offsets.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/kernel/asm-offsets.c 2016-06-19 15:30:54.923152195 +0200
-@@ -35,6 +35,7 @@
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 4106ac64f95e..21f4a79bda0a 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -35,6 +35,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -1141,10 +1719,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.26/arch/ar
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.26.orig/arch/arm64/kernel/entry.S linux-4.1.26/arch/arm64/kernel/entry.S
---- linux-4.1.26.orig/arch/arm64/kernel/entry.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/kernel/entry.S 2016-06-19 15:30:58.591293647 +0200
-@@ -367,11 +367,16 @@
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 05012cdb555f..7abfb48bd163 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -380,11 +380,16 @@ el1_irq:
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
@@ -1164,7 +1743,7 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/entry.S linux-4.1.26/arch/arm64/ke
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -385,6 +390,7 @@
+@@ -398,6 +403,7 @@ el1_preempt:
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@@ -1172,7 +1751,7 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/entry.S linux-4.1.26/arch/arm64/ke
ret x24
#endif
-@@ -622,6 +628,7 @@
+@@ -635,6 +641,7 @@ fast_work_pending:
str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
@@ -1180,10 +1759,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/entry.S linux-4.1.26/arch/arm64/ke
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
-diff -Nur linux-4.1.26.orig/arch/arm64/kernel/insn.c linux-4.1.26/arch/arm64/kernel/insn.c
---- linux-4.1.26.orig/arch/arm64/kernel/insn.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/kernel/insn.c 2016-06-19 15:30:58.595293802 +0200
-@@ -77,7 +77,7 @@
+diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
+index 924902083e47..30eb88e5b896 100644
+--- a/arch/arm64/kernel/insn.c
++++ b/arch/arm64/kernel/insn.c
+@@ -77,7 +77,7 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
@@ -1192,7 +1772,7 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/insn.c linux-4.1.26/arch/arm64/ker
static void __kprobes *patch_map(void *addr, int fixmap)
{
-@@ -124,13 +124,13 @@
+@@ -124,13 +124,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0;
int ret;
@@ -1208,10 +1788,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/insn.c linux-4.1.26/arch/arm64/ker
return ret;
}
-diff -Nur linux-4.1.26.orig/arch/arm64/kernel/perf_event.c linux-4.1.26/arch/arm64/kernel/perf_event.c
---- linux-4.1.26.orig/arch/arm64/kernel/perf_event.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/kernel/perf_event.c 2016-06-19 15:30:58.595293802 +0200
-@@ -488,7 +488,7 @@
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index b67b01cb5109..c4cb2596ede6 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
err = request_irq(irq, armpmu->handle_irq,
@@ -1220,10 +1801,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/kernel/perf_event.c linux-4.1.26/arch/arm
"arm-pmu", armpmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
-diff -Nur linux-4.1.26.orig/arch/arm64/mm/fault.c linux-4.1.26/arch/arm64/mm/fault.c
---- linux-4.1.26.orig/arch/arm64/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/arm64/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
-@@ -211,7 +211,7 @@
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index fa5efaa5c3ac..1fdbb3704698 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -211,7 +211,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* If we're in an interrupt or have no user context, we must not take
* the fault.
*/
@@ -1232,10 +1814,11 @@ diff -Nur linux-4.1.26.orig/arch/arm64/mm/fault.c linux-4.1.26/arch/arm64/mm/fau
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/avr32/include/asm/uaccess.h linux-4.1.26/arch/avr32/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/avr32/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/avr32/include/asm/uaccess.h 2016-06-19 15:30:58.595293802 +0200
-@@ -97,7 +97,8 @@
+diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
+index 20b52c40bcd2..b1ec1fa06463 100644
+--- a/arch/avr32/include/asm/uaccess.h
++++ b/arch/avr32/include/asm/uaccess.h
+@@ -106,7 +106,8 @@ static inline __kernel_size_t copy_from_user(void *to,
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1245,7 +1828,7 @@ diff -Nur linux-4.1.26.orig/arch/avr32/include/asm/uaccess.h linux-4.1.26/arch/a
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -116,7 +117,8 @@
+@@ -125,7 +126,8 @@ static inline __kernel_size_t copy_from_user(void *to,
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1255,7 +1838,7 @@ diff -Nur linux-4.1.26.orig/arch/avr32/include/asm/uaccess.h linux-4.1.26/arch/a
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -136,7 +138,8 @@
+@@ -145,7 +147,8 @@ static inline __kernel_size_t copy_from_user(void *to,
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1265,7 +1848,7 @@ diff -Nur linux-4.1.26.orig/arch/avr32/include/asm/uaccess.h linux-4.1.26/arch/a
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -158,7 +161,8 @@
+@@ -167,7 +170,8 @@ static inline __kernel_size_t copy_from_user(void *to,
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1275,9 +1858,10 @@ diff -Nur linux-4.1.26.orig/arch/avr32/include/asm/uaccess.h linux-4.1.26/arch/a
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.26.orig/arch/avr32/mm/fault.c linux-4.1.26/arch/avr32/mm/fault.c
---- linux-4.1.26.orig/arch/avr32/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/avr32/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index d223a8b57c1e..c03533937a9f 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
@@ -14,11 +14,11 @@
#include <linux/pagemap.h>
#include <linux/kdebug.h>
@@ -1291,7 +1875,7 @@ diff -Nur linux-4.1.26.orig/arch/avr32/mm/fault.c linux-4.1.26/arch/avr32/mm/fau
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, int trap)
-@@ -81,7 +81,7 @@
+@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
* If we're in an interrupt or have no user context, we must
* not take the fault...
*/
@@ -1300,9 +1884,10 @@ diff -Nur linux-4.1.26.orig/arch/avr32/mm/fault.c linux-4.1.26/arch/avr32/mm/fau
goto no_context;
local_irq_enable();
-diff -Nur linux-4.1.26.orig/arch/cris/mm/fault.c linux-4.1.26/arch/cris/mm/fault.c
---- linux-4.1.26.orig/arch/cris/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/cris/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
+index 83f12f2ed9e3..3066d40a6db1 100644
+--- a/arch/cris/mm/fault.c
++++ b/arch/cris/mm/fault.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -1312,7 +1897,7 @@ diff -Nur linux-4.1.26.orig/arch/cris/mm/fault.c linux-4.1.26/arch/cris/mm/fault
#include <arch/system.h>
extern int find_fixup_code(struct pt_regs *);
-@@ -109,11 +109,11 @@
+@@ -109,11 +109,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
info.si_code = SEGV_MAPERR;
/*
@@ -1326,9 +1911,10 @@ diff -Nur linux-4.1.26.orig/arch/cris/mm/fault.c linux-4.1.26/arch/cris/mm/fault
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/frv/mm/fault.c linux-4.1.26/arch/frv/mm/fault.c
---- linux-4.1.26.orig/arch/frv/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/frv/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
+index ec4917ddf678..61d99767fe16 100644
+--- a/arch/frv/mm/fault.c
++++ b/arch/frv/mm/fault.c
@@ -19,9 +19,9 @@
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -1340,7 +1926,7 @@ diff -Nur linux-4.1.26.orig/arch/frv/mm/fault.c linux-4.1.26/arch/frv/mm/fault.c
#include <asm/gdb-stub.h>
/*****************************************************************************/
-@@ -78,7 +78,7 @@
+@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -1349,10 +1935,11 @@ diff -Nur linux-4.1.26.orig/arch/frv/mm/fault.c linux-4.1.26/arch/frv/mm/fault.c
goto no_context;
if (user_mode(__frame))
-diff -Nur linux-4.1.26.orig/arch/frv/mm/highmem.c linux-4.1.26/arch/frv/mm/highmem.c
---- linux-4.1.26.orig/arch/frv/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/frv/mm/highmem.c 2016-06-19 15:30:58.595293802 +0200
-@@ -42,6 +42,7 @@
+diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
+index bed9a9bd3c10..785344bbdc07 100644
+--- a/arch/frv/mm/highmem.c
++++ b/arch/frv/mm/highmem.c
+@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
unsigned long paddr;
int type;
@@ -1360,16 +1947,17 @@ diff -Nur linux-4.1.26.orig/arch/frv/mm/highmem.c linux-4.1.26/arch/frv/mm/highm
pagefault_disable();
type = kmap_atomic_idx_push();
paddr = page_to_phys(page);
-@@ -85,5 +86,6 @@
+@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
}
kmap_atomic_idx_pop();
pagefault_enable();
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/hexagon/include/asm/uaccess.h linux-4.1.26/arch/hexagon/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/hexagon/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/hexagon/include/asm/uaccess.h 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
+index 25fc9049db8a..f61cfb28e9f2 100644
+--- a/arch/hexagon/include/asm/uaccess.h
++++ b/arch/hexagon/include/asm/uaccess.h
@@ -36,7 +36,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -1380,9 +1968,10 @@ diff -Nur linux-4.1.26.orig/arch/hexagon/include/asm/uaccess.h linux-4.1.26/arch
*
* Checks if a pointer to a block of memory in user space is valid.
*
-diff -Nur linux-4.1.26.orig/arch/ia64/mm/fault.c linux-4.1.26/arch/ia64/mm/fault.c
---- linux-4.1.26.orig/arch/ia64/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/ia64/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index ba5ba7accd0d..70b40d1205a6 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
@@ -11,10 +11,10 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -1395,7 +1984,7 @@ diff -Nur linux-4.1.26.orig/arch/ia64/mm/fault.c linux-4.1.26/arch/ia64/mm/fault
extern int die(char *, struct pt_regs *, long);
-@@ -96,7 +96,7 @@
+@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
@@ -1404,29 +1993,11 @@ diff -Nur linux-4.1.26.orig/arch/ia64/mm/fault.c linux-4.1.26/arch/ia64/mm/fault
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-diff -Nur linux-4.1.26.orig/arch/Kconfig linux-4.1.26/arch/Kconfig
---- linux-4.1.26.orig/arch/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/Kconfig 2016-06-19 15:30:54.915151887 +0200
-@@ -6,6 +6,7 @@
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
-+ depends on !PREEMPT_RT_FULL
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
-@@ -49,6 +50,7 @@
- config JUMP_LABEL
- bool "Optimize very unlikely/likely branches"
- depends on HAVE_ARCH_JUMP_LABEL
-+ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
- help
- This option enables a transparent branch optimization that
- makes certain almost-always-true or almost-always-false branch
-diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m32r/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/m32r/include/asm/uaccess.h 2016-06-19 15:30:58.595293802 +0200
-@@ -91,7 +91,8 @@
+diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
+index c66a38d0a895..6f8982157a75 100644
+--- a/arch/m32r/include/asm/uaccess.h
++++ b/arch/m32r/include/asm/uaccess.h
+@@ -91,7 +91,8 @@ static inline void set_fs(mm_segment_t s)
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
@@ -1436,7 +2007,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* Checks if a pointer to a block of memory in user space is valid.
*
-@@ -155,7 +156,8 @@
+@@ -155,7 +156,8 @@ extern int fixup_exception(struct pt_regs *regs);
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1446,7 +2017,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -175,7 +177,8 @@
+@@ -175,7 +177,8 @@ extern int fixup_exception(struct pt_regs *regs);
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1456,7 +2027,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -194,7 +197,8 @@
+@@ -194,7 +197,8 @@ extern int fixup_exception(struct pt_regs *regs);
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1466,7 +2037,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -274,7 +278,8 @@
+@@ -274,7 +278,8 @@ do { \
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1476,7 +2047,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -568,7 +573,8 @@
+@@ -568,7 +573,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -1486,7 +2057,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -588,7 +594,8 @@
+@@ -588,7 +594,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -1496,7 +2067,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* Copy data from kernel space to user space.
*
-@@ -606,7 +613,8 @@
+@@ -606,7 +613,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -1506,7 +2077,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -626,7 +634,8 @@
+@@ -626,7 +634,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -1516,7 +2087,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* Copy data from user space to kernel space.
*
-@@ -677,7 +686,8 @@
+@@ -677,7 +686,8 @@ unsigned long clear_user(void __user *mem, unsigned long len);
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
@@ -1526,9 +2097,10 @@ diff -Nur linux-4.1.26.orig/arch/m32r/include/asm/uaccess.h linux-4.1.26/arch/m3
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.26.orig/arch/m32r/mm/fault.c linux-4.1.26/arch/m32r/mm/fault.c
---- linux-4.1.26.orig/arch/m32r/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/m32r/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
+index e3d4d4890104..8f9875b7933d 100644
+--- a/arch/m32r/mm/fault.c
++++ b/arch/m32r/mm/fault.c
@@ -24,9 +24,9 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/highmem.h>
@@ -1540,7 +2112,7 @@ diff -Nur linux-4.1.26.orig/arch/m32r/mm/fault.c linux-4.1.26/arch/m32r/mm/fault
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-@@ -111,10 +111,10 @@
+@@ -111,10 +111,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
mm = tsk->mm;
/*
@@ -1554,9 +2126,10 @@ diff -Nur linux-4.1.26.orig/arch/m32r/mm/fault.c linux-4.1.26/arch/m32r/mm/fault
goto bad_area_nosemaphore;
if (error_code & ACE_USERMODE)
-diff -Nur linux-4.1.26.orig/arch/m68k/mm/fault.c linux-4.1.26/arch/m68k/mm/fault.c
---- linux-4.1.26.orig/arch/m68k/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/m68k/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index b2f04aee46ec..6a94cdd0c830 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
@@ -10,10 +10,10 @@
#include <linux/ptrace.h>
#include <linux/interrupt.h>
@@ -1569,7 +2142,7 @@ diff -Nur linux-4.1.26.orig/arch/m68k/mm/fault.c linux-4.1.26/arch/m68k/mm/fault
#include <asm/pgalloc.h>
extern void die_if_kernel(char *, struct pt_regs *, long);
-@@ -81,7 +81,7 @@
+@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -1578,10 +2151,11 @@ diff -Nur linux-4.1.26.orig/arch/m68k/mm/fault.c linux-4.1.26/arch/m68k/mm/fault
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/metag/mm/fault.c linux-4.1.26/arch/metag/mm/fault.c
---- linux-4.1.26.orig/arch/metag/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/metag/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
-@@ -105,7 +105,7 @@
+diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
+index 2de5dc695a87..f57edca63609 100644
+--- a/arch/metag/mm/fault.c
++++ b/arch/metag/mm/fault.c
+@@ -105,7 +105,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
mm = tsk->mm;
@@ -1590,10 +2164,11 @@ diff -Nur linux-4.1.26.orig/arch/metag/mm/fault.c linux-4.1.26/arch/metag/mm/fau
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/metag/mm/highmem.c linux-4.1.26/arch/metag/mm/highmem.c
---- linux-4.1.26.orig/arch/metag/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/metag/mm/highmem.c 2016-06-19 15:30:58.595293802 +0200
-@@ -43,7 +43,7 @@
+diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c
+index d71f621a2c0b..807f1b1c4e65 100644
+--- a/arch/metag/mm/highmem.c
++++ b/arch/metag/mm/highmem.c
+@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
unsigned long vaddr;
int type;
@@ -1602,7 +2177,7 @@ diff -Nur linux-4.1.26.orig/arch/metag/mm/highmem.c linux-4.1.26/arch/metag/mm/h
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -82,6 +82,7 @@
+@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
}
pagefault_enable();
@@ -1610,7 +2185,7 @@ diff -Nur linux-4.1.26.orig/arch/metag/mm/highmem.c linux-4.1.26/arch/metag/mm/h
}
EXPORT_SYMBOL(__kunmap_atomic);
-@@ -95,6 +96,7 @@
+@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
unsigned long vaddr;
int type;
@@ -1618,10 +2193,11 @@ diff -Nur linux-4.1.26.orig/arch/metag/mm/highmem.c linux-4.1.26/arch/metag/mm/h
pagefault_disable();
type = kmap_atomic_idx_push();
-diff -Nur linux-4.1.26.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.26/arch/microblaze/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/microblaze/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/microblaze/include/asm/uaccess.h 2016-06-19 15:30:58.595293802 +0200
-@@ -178,7 +178,8 @@
+diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
+index 0c0a5cfbf79a..826676778094 100644
+--- a/arch/microblaze/include/asm/uaccess.h
++++ b/arch/microblaze/include/asm/uaccess.h
+@@ -178,7 +178,8 @@ extern long __user_bad(void);
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1631,7 +2207,7 @@ diff -Nur linux-4.1.26.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.26/a
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -290,7 +291,8 @@
+@@ -290,7 +291,8 @@ extern long __user_bad(void);
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1641,10 +2217,11 @@ diff -Nur linux-4.1.26.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.26/a
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.26.orig/arch/microblaze/mm/fault.c linux-4.1.26/arch/microblaze/mm/fault.c
---- linux-4.1.26.orig/arch/microblaze/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/microblaze/mm/fault.c 2016-06-19 15:30:58.595293802 +0200
-@@ -107,14 +107,14 @@
+diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
+index d46a5ebb7570..177dfc003643 100644
+--- a/arch/microblaze/mm/fault.c
++++ b/arch/microblaze/mm/fault.c
+@@ -107,14 +107,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
@@ -1663,10 +2240,11 @@ diff -Nur linux-4.1.26.orig/arch/microblaze/mm/fault.c linux-4.1.26/arch/microbl
pr_emerg("r15 = %lx MSR = %lx\n",
regs->r15, regs->msr);
die("Weird page fault", regs, SIGSEGV);
-diff -Nur linux-4.1.26.orig/arch/microblaze/mm/highmem.c linux-4.1.26/arch/microblaze/mm/highmem.c
---- linux-4.1.26.orig/arch/microblaze/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/microblaze/mm/highmem.c 2016-06-19 15:30:58.599293956 +0200
-@@ -37,7 +37,7 @@
+diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
+index 5a92576fad92..2fcc5a52d84d 100644
+--- a/arch/microblaze/mm/highmem.c
++++ b/arch/microblaze/mm/highmem.c
+@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
unsigned long vaddr;
int idx, type;
@@ -1675,7 +2253,7 @@ diff -Nur linux-4.1.26.orig/arch/microblaze/mm/highmem.c linux-4.1.26/arch/micro
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -63,6 +63,7 @@
+@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
@@ -1683,17 +2261,31 @@ diff -Nur linux-4.1.26.orig/arch/microblaze/mm/highmem.c linux-4.1.26/arch/micro
return;
}
-@@ -84,5 +85,6 @@
+@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
#endif
kmap_atomic_idx_pop();
pagefault_enable();
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mips/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/mips/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mips/include/asm/uaccess.h 2016-06-19 15:30:58.599293956 +0200
-@@ -103,7 +103,8 @@
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index c99e8a32bea4..7e6ab18c488a 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2367,7 +2367,7 @@ config CPU_R4400_WORKAROUNDS
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index bc2f5164ce51..6dc7f5130d49 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -104,7 +104,8 @@ extern u64 __ua_limit;
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
@@ -1703,7 +2295,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Checks if a pointer to a block of memory in user space is valid.
*
-@@ -138,7 +139,8 @@
+@@ -139,7 +140,8 @@ extern u64 __ua_limit;
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1713,7 +2305,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -157,7 +159,8 @@
+@@ -158,7 +160,8 @@ extern u64 __ua_limit;
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1723,7 +2315,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -177,7 +180,8 @@
+@@ -178,7 +181,8 @@ extern u64 __ua_limit;
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1733,7 +2325,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -199,7 +203,8 @@
+@@ -200,7 +204,8 @@ extern u64 __ua_limit;
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1743,7 +2335,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -498,7 +503,8 @@
+@@ -499,7 +504,8 @@ extern void __put_user_unknown(void);
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1753,7 +2345,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -517,7 +523,8 @@
+@@ -518,7 +524,8 @@ extern void __put_user_unknown(void);
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1763,7 +2355,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -537,7 +544,8 @@
+@@ -538,7 +545,8 @@ extern void __put_user_unknown(void);
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -1773,7 +2365,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -559,7 +567,8 @@
+@@ -560,7 +568,8 @@ extern void __put_user_unknown(void);
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -1783,7 +2375,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -815,7 +824,8 @@
+@@ -816,7 +825,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -1793,7 +2385,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -888,7 +898,8 @@
+@@ -889,7 +899,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -1803,7 +2395,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Copy data from kernel space to user space.
*
-@@ -1075,7 +1086,8 @@
+@@ -1076,7 +1087,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -1813,7 +2405,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -1107,7 +1119,8 @@
+@@ -1108,7 +1120,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -1823,7 +2415,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Copy data from user space to kernel space.
*
-@@ -1329,7 +1342,8 @@
+@@ -1332,7 +1345,8 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
@@ -1833,7 +2425,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Get the size of a NUL-terminated string in user space.
*
-@@ -1398,7 +1412,8 @@
+@@ -1401,7 +1415,8 @@ static inline long __strnlen_user(const char __user *s, long n)
* strnlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
@@ -1843,22 +2435,11 @@ diff -Nur linux-4.1.26.orig/arch/mips/include/asm/uaccess.h linux-4.1.26/arch/mi
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.26.orig/arch/mips/Kconfig linux-4.1.26/arch/mips/Kconfig
---- linux-4.1.26.orig/arch/mips/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mips/Kconfig 2016-06-19 15:30:58.599293956 +0200
-@@ -2367,7 +2367,7 @@
- #
- config HIGHMEM
- bool "High Memory Support"
-- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
-+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
-
- config CPU_SUPPORTS_HIGHMEM
- bool
-diff -Nur linux-4.1.26.orig/arch/mips/kernel/signal-common.h linux-4.1.26/arch/mips/kernel/signal-common.h
---- linux-4.1.26.orig/arch/mips/kernel/signal-common.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mips/kernel/signal-common.h 2016-06-19 15:30:58.599293956 +0200
-@@ -28,12 +28,7 @@
+diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
+index 06805e09bcd3..0b85f827cd18 100644
+--- a/arch/mips/kernel/signal-common.h
++++ b/arch/mips/kernel/signal-common.h
+@@ -28,12 +28,7 @@ extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
extern int fpcsr_pending(unsigned int __user *fpcsr);
/* Make sure we will not lose FPU ownership */
@@ -1873,9 +2454,10 @@ diff -Nur linux-4.1.26.orig/arch/mips/kernel/signal-common.h linux-4.1.26/arch/m
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
#endif /* __SIGNAL_COMMON_H */
-diff -Nur linux-4.1.26.orig/arch/mips/mm/fault.c linux-4.1.26/arch/mips/mm/fault.c
---- linux-4.1.26.orig/arch/mips/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mips/mm/fault.c 2016-06-19 15:30:58.599293956 +0200
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 7ff8637e530d..36c0f26fac6b 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
@@ -21,10 +21,10 @@
#include <linux/module.h>
#include <linux/kprobes.h>
@@ -1888,7 +2470,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/fault.c linux-4.1.26/arch/mips/mm/fault
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
-@@ -94,7 +94,7 @@
+@@ -94,7 +94,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -1897,10 +2479,11 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/fault.c linux-4.1.26/arch/mips/mm/fault
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/mips/mm/highmem.c linux-4.1.26/arch/mips/mm/highmem.c
---- linux-4.1.26.orig/arch/mips/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mips/mm/highmem.c 2016-06-19 15:30:58.599293956 +0200
-@@ -47,7 +47,7 @@
+diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
+index da815d295239..11661cbc11a8 100644
+--- a/arch/mips/mm/highmem.c
++++ b/arch/mips/mm/highmem.c
+@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
unsigned long vaddr;
int idx, type;
@@ -1909,7 +2492,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/highmem.c linux-4.1.26/arch/mips/mm/hig
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -72,6 +72,7 @@
+@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
@@ -1917,7 +2500,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/highmem.c linux-4.1.26/arch/mips/mm/hig
return;
}
-@@ -92,6 +93,7 @@
+@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
#endif
kmap_atomic_idx_pop();
pagefault_enable();
@@ -1925,7 +2508,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/highmem.c linux-4.1.26/arch/mips/mm/hig
}
EXPORT_SYMBOL(__kunmap_atomic);
-@@ -104,6 +106,7 @@
+@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
unsigned long vaddr;
int idx, type;
@@ -1933,10 +2516,11 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/highmem.c linux-4.1.26/arch/mips/mm/hig
pagefault_disable();
type = kmap_atomic_idx_push();
-diff -Nur linux-4.1.26.orig/arch/mips/mm/init.c linux-4.1.26/arch/mips/mm/init.c
---- linux-4.1.26.orig/arch/mips/mm/init.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mips/mm/init.c 2016-06-19 15:30:58.599293956 +0200
-@@ -90,6 +90,7 @@
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index faa5c9822ecc..198a3147dd7d 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -90,6 +90,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
BUG_ON(Page_dcache_dirty(page));
@@ -1944,7 +2528,7 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/init.c linux-4.1.26/arch/mips/mm/init.c
pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
idx += in_interrupt() ? FIX_N_COLOURS : 0;
-@@ -152,6 +153,7 @@
+@@ -152,6 +153,7 @@ void kunmap_coherent(void)
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
pagefault_enable();
@@ -1952,10 +2536,11 @@ diff -Nur linux-4.1.26.orig/arch/mips/mm/init.c linux-4.1.26/arch/mips/mm/init.c
}
void copy_user_highpage(struct page *to, struct page *from,
-diff -Nur linux-4.1.26.orig/arch/mn10300/include/asm/highmem.h linux-4.1.26/arch/mn10300/include/asm/highmem.h
---- linux-4.1.26.orig/arch/mn10300/include/asm/highmem.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mn10300/include/asm/highmem.h 2016-06-19 15:30:58.599293956 +0200
-@@ -75,6 +75,7 @@
+diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
+index 2fbbe4d920aa..1ddea5afba09 100644
+--- a/arch/mn10300/include/asm/highmem.h
++++ b/arch/mn10300/include/asm/highmem.h
+@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
unsigned long vaddr;
int idx, type;
@@ -1963,7 +2548,7 @@ diff -Nur linux-4.1.26.orig/arch/mn10300/include/asm/highmem.h linux-4.1.26/arch
pagefault_disable();
if (page < highmem_start_page)
return page_address(page);
-@@ -98,6 +99,7 @@
+@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
if (vaddr < FIXADDR_START) { /* FIXME */
pagefault_enable();
@@ -1971,7 +2556,7 @@ diff -Nur linux-4.1.26.orig/arch/mn10300/include/asm/highmem.h linux-4.1.26/arch
return;
}
-@@ -122,6 +124,7 @@
+@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
kmap_atomic_idx_pop();
pagefault_enable();
@@ -1979,9 +2564,10 @@ diff -Nur linux-4.1.26.orig/arch/mn10300/include/asm/highmem.h linux-4.1.26/arch
}
#endif /* __KERNEL__ */
-diff -Nur linux-4.1.26.orig/arch/mn10300/mm/fault.c linux-4.1.26/arch/mn10300/mm/fault.c
---- linux-4.1.26.orig/arch/mn10300/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/mn10300/mm/fault.c 2016-06-19 15:30:58.599293956 +0200
+diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
+index 0c2cc5d39c8e..4a1d181ed32f 100644
+--- a/arch/mn10300/mm/fault.c
++++ b/arch/mn10300/mm/fault.c
@@ -23,8 +23,8 @@
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1992,7 +2578,7 @@ diff -Nur linux-4.1.26.orig/arch/mn10300/mm/fault.c linux-4.1.26/arch/mn10300/mm
#include <asm/pgalloc.h>
#include <asm/hardirq.h>
#include <asm/cpu-regs.h>
-@@ -168,7 +168,7 @@
+@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -2001,10 +2587,11 @@ diff -Nur linux-4.1.26.orig/arch/mn10300/mm/fault.c linux-4.1.26/arch/mn10300/mm
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-diff -Nur linux-4.1.26.orig/arch/nios2/mm/fault.c linux-4.1.26/arch/nios2/mm/fault.c
---- linux-4.1.26.orig/arch/nios2/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/nios2/mm/fault.c 2016-06-19 15:30:58.599293956 +0200
-@@ -77,7 +77,7 @@
+diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
+index 0c9b6afe69e9..b51878b0c6b8 100644
+--- a/arch/nios2/mm/fault.c
++++ b/arch/nios2/mm/fault.c
+@@ -77,7 +77,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -2013,10 +2600,11 @@ diff -Nur linux-4.1.26.orig/arch/nios2/mm/fault.c linux-4.1.26/arch/nios2/mm/fau
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.26/arch/parisc/include/asm/cacheflush.h
---- linux-4.1.26.orig/arch/parisc/include/asm/cacheflush.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/parisc/include/asm/cacheflush.h 2016-06-19 15:30:58.599293956 +0200
-@@ -142,6 +142,7 @@
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index de65f66ea64e..ec2df4bab302 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
@@ -2024,7 +2612,7 @@ diff -Nur linux-4.1.26.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.26/ar
pagefault_disable();
return page_address(page);
}
-@@ -150,6 +151,7 @@
+@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
{
flush_kernel_dcache_page_addr(addr);
pagefault_enable();
@@ -2032,9 +2620,10 @@ diff -Nur linux-4.1.26.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.26/ar
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-diff -Nur linux-4.1.26.orig/arch/parisc/kernel/traps.c linux-4.1.26/arch/parisc/kernel/traps.c
---- linux-4.1.26.orig/arch/parisc/kernel/traps.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/parisc/kernel/traps.c 2016-06-19 15:30:58.599293956 +0200
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index bbf22658d1a3..341966889a51 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
@@ -26,9 +26,9 @@
#include <linux/console.h>
#include <linux/bug.h>
@@ -2046,7 +2635,7 @@ diff -Nur linux-4.1.26.orig/arch/parisc/kernel/traps.c linux-4.1.26/arch/parisc/
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
-@@ -796,7 +796,7 @@
+@@ -796,7 +796,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
* unless pagefault_disable() was called before.
*/
@@ -2055,9 +2644,10 @@ diff -Nur linux-4.1.26.orig/arch/parisc/kernel/traps.c linux-4.1.26/arch/parisc/
{
/* Clean up and return if in exception table. */
if (fixup_exception(regs))
-diff -Nur linux-4.1.26.orig/arch/parisc/mm/fault.c linux-4.1.26/arch/parisc/mm/fault.c
---- linux-4.1.26.orig/arch/parisc/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/parisc/mm/fault.c 2016-06-19 15:30:58.599293956 +0200
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 50d64a7fc672..3bc9db1ad19a 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
@@ -15,8 +15,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -2068,7 +2658,7 @@ diff -Nur linux-4.1.26.orig/arch/parisc/mm/fault.c linux-4.1.26/arch/parisc/mm/f
#include <asm/traps.h>
/* Various important other fields */
-@@ -208,7 +208,7 @@
+@@ -208,7 +208,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
int fault;
unsigned int flags;
@@ -2077,10 +2667,45 @@ diff -Nur linux-4.1.26.orig/arch/parisc/mm/fault.c linux-4.1.26/arch/parisc/mm/f
goto no_context;
tsk = current;
-diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.26/arch/powerpc/include/asm/kvm_host.h
---- linux-4.1.26.orig/arch/powerpc/include/asm/kvm_host.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/include/asm/kvm_host.h 2016-06-19 15:30:58.599293956 +0200
-@@ -280,7 +280,7 @@
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 190cc48abc0c..7b70a5754e34 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config GENERIC_LOCKBREAK
+ bool
+@@ -138,6 +139,7 @@ config PPC
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+ select HAVE_MOD_ARCH_SPECIFIC
+ select MODULES_USE_ELF_RELA
+ select CLONE_BACKWARDS
+@@ -312,7 +314,7 @@ menu "Kernel options"
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index a193a13cf08b..a1ddf4080e1a 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -280,7 +280,7 @@ struct kvmppc_vcore {
u8 in_guest;
struct list_head runnable_threads;
spinlock_t lock;
@@ -2089,7 +2714,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.26/arc
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
-@@ -613,7 +613,7 @@
+@@ -613,7 +613,7 @@ struct kvm_vcpu_arch {
u8 prodded;
u32 last_inst;
@@ -2098,10 +2723,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.26/arc
struct kvmppc_vcore *vcore;
int ret;
int trap;
-diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.26/arch/powerpc/include/asm/thread_info.h
---- linux-4.1.26.orig/arch/powerpc/include/asm/thread_info.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/include/asm/thread_info.h 2016-06-19 15:30:58.603294110 +0200
-@@ -42,6 +42,8 @@
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 7efee4a3240b..40e6fa1b85b2 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -42,6 +42,8 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
@@ -2110,7 +2736,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.26/
unsigned long local_flags; /* private flags for thread */
/* low level flags - has atomic operations done on it */
-@@ -82,8 +84,7 @@
+@@ -82,8 +84,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
@@ -2120,7 +2746,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.26/
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-@@ -101,6 +102,8 @@
+@@ -101,6 +102,8 @@ static inline struct thread_info *current_thread_info(void)
#if defined(CONFIG_PPC64)
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
@@ -2129,7 +2755,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.26/
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -119,14 +122,16 @@
+@@ -119,14 +122,16 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -2147,43 +2773,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.26/
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -Nur linux-4.1.26.orig/arch/powerpc/Kconfig linux-4.1.26/arch/powerpc/Kconfig
---- linux-4.1.26.orig/arch/powerpc/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/Kconfig 2016-06-19 15:30:58.599293956 +0200
-@@ -60,10 +60,11 @@
-
- config RWSEM_GENERIC_SPINLOCK
- bool
-+ default y if PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
-
- config GENERIC_LOCKBREAK
- bool
-@@ -138,6 +139,7 @@
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
-+ select HAVE_PREEMPT_LAZY
- select HAVE_MOD_ARCH_SPECIFIC
- select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
-@@ -312,7 +314,7 @@
-
- config HIGHMEM
- bool "High memory support"
-- depends on PPC32
-+ depends on PPC32 && !PREEMPT_RT_FULL
-
- source kernel/Kconfig.hz
- source kernel/Kconfig.preempt
-diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/asm-offsets.c linux-4.1.26/arch/powerpc/kernel/asm-offsets.c
---- linux-4.1.26.orig/arch/powerpc/kernel/asm-offsets.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kernel/asm-offsets.c 2016-06-19 15:30:58.603294110 +0200
-@@ -160,6 +160,7 @@
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 0034b6b3556a..65cc771661c4 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -160,6 +160,7 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -2191,10 +2785,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/asm-offsets.c linux-4.1.26/arch/
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_32.S linux-4.1.26/arch/powerpc/kernel/entry_32.S
---- linux-4.1.26.orig/arch/powerpc/kernel/entry_32.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kernel/entry_32.S 2016-06-19 15:30:58.603294110 +0200
-@@ -813,7 +813,14 @@
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 46fc0f4d8982..3d390ac490d9 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -813,7 +813,14 @@ resume_kernel:
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -2209,7 +2804,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_32.S linux-4.1.26/arch/pow
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -824,11 +831,11 @@
+@@ -824,11 +831,11 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
@@ -2224,7 +2819,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_32.S linux-4.1.26/arch/pow
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1149,7 +1156,7 @@
+@@ -1149,7 +1156,7 @@ global_dbcr0:
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -2233,7 +2828,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_32.S linux-4.1.26/arch/pow
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1170,7 +1177,7 @@
+@@ -1170,7 +1177,7 @@ recheck:
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
@@ -2242,10 +2837,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_32.S linux-4.1.26/arch/pow
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
-diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_64.S linux-4.1.26/arch/powerpc/kernel/entry_64.S
---- linux-4.1.26.orig/arch/powerpc/kernel/entry_64.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kernel/entry_64.S 2016-06-19 15:30:58.603294110 +0200
-@@ -636,7 +636,7 @@
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index afbc20019c2e..5e2d2645d1e0 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -636,7 +636,7 @@ _GLOBAL(ret_from_except_lite)
#else
beq restore
#endif
@@ -2254,7 +2850,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_64.S linux-4.1.26/arch/pow
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -698,10 +698,18 @@
+@@ -698,10 +698,18 @@ resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -2274,7 +2870,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_64.S linux-4.1.26/arch/pow
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -718,7 +726,7 @@
+@@ -718,7 +726,7 @@ resume_kernel:
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
@@ -2283,10 +2879,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/entry_64.S linux-4.1.26/arch/pow
bne 1b
/*
-diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/irq.c linux-4.1.26/arch/powerpc/kernel/irq.c
---- linux-4.1.26.orig/arch/powerpc/kernel/irq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kernel/irq.c 2016-06-19 15:30:58.603294110 +0200
-@@ -614,6 +614,7 @@
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 45096033d37b..6a8e55a17683 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -614,6 +614,7 @@ void irq_ctx_init(void)
}
}
@@ -2294,7 +2891,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/irq.c linux-4.1.26/arch/powerpc/
void do_softirq_own_stack(void)
{
struct thread_info *curtp, *irqtp;
-@@ -631,6 +632,7 @@
+@@ -631,6 +632,7 @@ void do_softirq_own_stack(void)
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
@@ -2302,9 +2899,10 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/irq.c linux-4.1.26/arch/powerpc/
irq_hw_number_t virq_to_hw(unsigned int virq)
{
-diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/misc_32.S linux-4.1.26/arch/powerpc/kernel/misc_32.S
---- linux-4.1.26.orig/arch/powerpc/kernel/misc_32.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kernel/misc_32.S 2016-06-19 15:30:58.603294110 +0200
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 7c6bb4b17b49..e9dfe2270e93 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
@@ -40,6 +40,7 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
@@ -2313,7 +2911,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/misc_32.S linux-4.1.26/arch/powe
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
-@@ -56,6 +57,7 @@
+@@ -56,6 +57,7 @@ _GLOBAL(call_do_softirq)
stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
@@ -2321,9 +2919,10 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/misc_32.S linux-4.1.26/arch/powe
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/misc_64.S linux-4.1.26/arch/powerpc/kernel/misc_64.S
---- linux-4.1.26.orig/arch/powerpc/kernel/misc_64.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kernel/misc_64.S 2016-06-19 15:30:58.603294110 +0200
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 4e314b90c75d..8a7238dd2f4b 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
@@ -29,6 +29,7 @@
.text
@@ -2332,7 +2931,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/misc_64.S linux-4.1.26/arch/powe
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
-@@ -39,6 +40,7 @@
+@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq)
ld r0,16(r1)
mtlr r0
blr
@@ -2340,10 +2939,23 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kernel/misc_64.S linux-4.1.26/arch/powe
_GLOBAL(call_do_irq)
mflr r0
-diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/powerpc/kvm/book3s_hv.c
---- linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kvm/book3s_hv.c 2016-06-19 15:30:58.603294110 +0200
-@@ -115,11 +115,11 @@
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index 3caec2c42105..d4c48506ea1b 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -172,6 +172,7 @@ config KVM_E500MC
+ config KVM_MPIC
+ bool "KVM in-kernel MPIC emulation"
+ depends on KVM && E500
++ depends on !PREEMPT_RT_FULL
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQ_ROUTING
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index f5b3de7f7fa2..c3f43a405b58 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -115,11 +115,11 @@ static bool kvmppc_ipi_thread(int cpu)
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
int cpu = vcpu->cpu;
@@ -2358,7 +2970,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
++vcpu->stat.halt_wakeup;
}
-@@ -692,8 +692,8 @@
+@@ -692,8 +692,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
tvcpu->arch.prodded = 1;
smp_mb();
if (vcpu->arch.ceded) {
@@ -2369,7 +2981,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
vcpu->stat.halt_wakeup++;
}
}
-@@ -1432,7 +1432,7 @@
+@@ -1432,7 +1432,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
@@ -2378,7 +2990,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore;
-@@ -2079,10 +2079,9 @@
+@@ -2079,10 +2079,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
struct kvm_vcpu *vcpu;
int do_sleep = 1;
@@ -2391,7 +3003,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
/*
* Check one last time for pending exceptions and ceded state after
-@@ -2096,7 +2095,7 @@
+@@ -2096,7 +2095,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
}
if (!do_sleep) {
@@ -2400,7 +3012,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
return;
}
-@@ -2104,7 +2103,7 @@
+@@ -2104,7 +2103,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
@@ -2409,7 +3021,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1);
-@@ -2148,7 +2147,7 @@
+@@ -2148,7 +2147,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_start_thread(vcpu);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -2418,20 +3030,10 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.26/arch/power
}
}
-diff -Nur linux-4.1.26.orig/arch/powerpc/kvm/Kconfig linux-4.1.26/arch/powerpc/kvm/Kconfig
---- linux-4.1.26.orig/arch/powerpc/kvm/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/kvm/Kconfig 2016-06-19 15:30:58.603294110 +0200
-@@ -172,6 +172,7 @@
- config KVM_MPIC
- bool "KVM in-kernel MPIC emulation"
- depends on KVM && E500
-+ depends on !PREEMPT_RT_FULL
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQ_ROUTING
-diff -Nur linux-4.1.26.orig/arch/powerpc/mm/fault.c linux-4.1.26/arch/powerpc/mm/fault.c
---- linux-4.1.26.orig/arch/powerpc/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/mm/fault.c 2016-06-19 15:30:58.603294110 +0200
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index b396868d2aa7..6d535973b200 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
@@ -33,13 +33,13 @@
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
@@ -2447,7 +3049,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/mm/fault.c linux-4.1.26/arch/powerpc/mm
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
-@@ -272,15 +272,16 @@
+@@ -272,15 +272,16 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
@@ -2467,10 +3069,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/mm/fault.c linux-4.1.26/arch/powerpc/mm
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
-diff -Nur linux-4.1.26.orig/arch/powerpc/mm/highmem.c linux-4.1.26/arch/powerpc/mm/highmem.c
---- linux-4.1.26.orig/arch/powerpc/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/mm/highmem.c 2016-06-19 15:30:58.603294110 +0200
-@@ -34,7 +34,7 @@
+diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
+index e7450bdbe83a..e292c8a60952 100644
+--- a/arch/powerpc/mm/highmem.c
++++ b/arch/powerpc/mm/highmem.c
+@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
unsigned long vaddr;
int idx, type;
@@ -2479,7 +3082,7 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/mm/highmem.c linux-4.1.26/arch/powerpc/
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -59,6 +59,7 @@
+@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
@@ -2487,17 +3090,18 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/mm/highmem.c linux-4.1.26/arch/powerpc/
return;
}
-@@ -82,5 +83,6 @@
+@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
kmap_atomic_idx_pop();
pagefault_enable();
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.1.26/arch/powerpc/platforms/ps3/device-init.c
---- linux-4.1.26.orig/arch/powerpc/platforms/ps3/device-init.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/powerpc/platforms/ps3/device-init.c 2016-06-19 15:30:58.603294110 +0200
-@@ -752,7 +752,7 @@
+diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
+index 3f175e8aedb4..c4c02f91904c 100644
+--- a/arch/powerpc/platforms/ps3/device-init.c
++++ b/arch/powerpc/platforms/ps3/device-init.c
+@@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
@@ -2506,10 +3110,11 @@ diff -Nur linux-4.1.26.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.1.2
dev->done.done || kthread_should_stop());
if (kthread_should_stop())
res = -EINTR;
-diff -Nur linux-4.1.26.orig/arch/s390/include/asm/kvm_host.h linux-4.1.26/arch/s390/include/asm/kvm_host.h
---- linux-4.1.26.orig/arch/s390/include/asm/kvm_host.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/s390/include/asm/kvm_host.h 2016-06-19 15:30:58.603294110 +0200
-@@ -419,7 +419,7 @@
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index d01fc588b5c3..905007eead88 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -419,7 +419,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_local_interrupt {
spinlock_t lock;
struct kvm_s390_float_interrupt *float_int;
@@ -2518,10 +3123,11 @@ diff -Nur linux-4.1.26.orig/arch/s390/include/asm/kvm_host.h linux-4.1.26/arch/s
atomic_t *cpuflags;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq;
-diff -Nur linux-4.1.26.orig/arch/s390/include/asm/uaccess.h linux-4.1.26/arch/s390/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/s390/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/s390/include/asm/uaccess.h 2016-06-19 15:30:58.603294110 +0200
-@@ -98,7 +98,8 @@
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index f6ac1d7e7ed8..5c7381c5ad7f 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -98,7 +98,8 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -2531,7 +3137,7 @@ diff -Nur linux-4.1.26.orig/arch/s390/include/asm/uaccess.h linux-4.1.26/arch/s3
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -118,7 +119,8 @@
+@@ -118,7 +119,8 @@ unsigned long __must_check __copy_from_user(void *to, const void __user *from,
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -2541,7 +3147,7 @@ diff -Nur linux-4.1.26.orig/arch/s390/include/asm/uaccess.h linux-4.1.26/arch/s3
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -264,7 +266,8 @@
+@@ -264,7 +266,8 @@ int __get_user_bad(void) __attribute__((noreturn));
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -2551,7 +3157,7 @@ diff -Nur linux-4.1.26.orig/arch/s390/include/asm/uaccess.h linux-4.1.26/arch/s3
*
* Copy data from kernel space to user space.
*
-@@ -290,7 +293,8 @@
+@@ -290,7 +293,8 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -2561,7 +3167,7 @@ diff -Nur linux-4.1.26.orig/arch/s390/include/asm/uaccess.h linux-4.1.26/arch/s3
*
* Copy data from user space to kernel space.
*
-@@ -348,7 +352,8 @@
+@@ -348,7 +352,8 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
@@ -2571,10 +3177,11 @@ diff -Nur linux-4.1.26.orig/arch/s390/include/asm/uaccess.h linux-4.1.26/arch/s3
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.26.orig/arch/s390/kvm/interrupt.c linux-4.1.26/arch/s390/kvm/interrupt.c
---- linux-4.1.26.orig/arch/s390/kvm/interrupt.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/s390/kvm/interrupt.c 2016-06-19 15:30:58.607294264 +0200
-@@ -875,13 +875,13 @@
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 3dbba9a2bb0f..15016703b4bf 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -875,13 +875,13 @@ no_timer:
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
@@ -2590,7 +3197,7 @@ diff -Nur linux-4.1.26.orig/arch/s390/kvm/interrupt.c linux-4.1.26/arch/s390/kvm
vcpu->stat.halt_wakeup++;
}
}
-@@ -987,7 +987,7 @@
+@@ -987,7 +987,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
spin_lock(&li->lock);
irq.u.pgm.code = code;
__inject_prog(vcpu, &irq);
@@ -2599,7 +3206,7 @@ diff -Nur linux-4.1.26.orig/arch/s390/kvm/interrupt.c linux-4.1.26/arch/s390/kvm
spin_unlock(&li->lock);
return 0;
}
-@@ -1006,7 +1006,7 @@
+@@ -1006,7 +1006,7 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
spin_lock(&li->lock);
irq.u.pgm = *pgm_info;
rc = __inject_prog(vcpu, &irq);
@@ -2608,10 +3215,11 @@ diff -Nur linux-4.1.26.orig/arch/s390/kvm/interrupt.c linux-4.1.26/arch/s390/kvm
spin_unlock(&li->lock);
return rc;
}
-diff -Nur linux-4.1.26.orig/arch/s390/mm/fault.c linux-4.1.26/arch/s390/mm/fault.c
---- linux-4.1.26.orig/arch/s390/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/s390/mm/fault.c 2016-06-19 15:30:58.607294264 +0200
-@@ -399,7 +399,7 @@
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index 76515bcea2f1..4c8f5d7f9c23 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -399,7 +399,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
@@ -2620,9 +3228,10 @@ diff -Nur linux-4.1.26.orig/arch/s390/mm/fault.c linux-4.1.26/arch/s390/mm/fault
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
-diff -Nur linux-4.1.26.orig/arch/score/include/asm/uaccess.h linux-4.1.26/arch/score/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/score/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/score/include/asm/uaccess.h 2016-06-19 15:30:58.607294264 +0200
+diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
+index 69326dfb894d..01aec8ccde83 100644
+--- a/arch/score/include/asm/uaccess.h
++++ b/arch/score/include/asm/uaccess.h
@@ -36,7 +36,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -2673,9 +3282,10 @@ diff -Nur linux-4.1.26.orig/arch/score/include/asm/uaccess.h linux-4.1.26/arch/s
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.26.orig/arch/score/mm/fault.c linux-4.1.26/arch/score/mm/fault.c
---- linux-4.1.26.orig/arch/score/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/score/mm/fault.c 2016-06-19 15:30:58.607294264 +0200
+diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
+index 6860beb2a280..37a6c2e0e969 100644
+--- a/arch/score/mm/fault.c
++++ b/arch/score/mm/fault.c
@@ -34,6 +34,7 @@
#include <linux/string.h>
#include <linux/types.h>
@@ -2684,7 +3294,7 @@ diff -Nur linux-4.1.26.orig/arch/score/mm/fault.c linux-4.1.26/arch/score/mm/fau
/*
* This routine handles page faults. It determines the address,
-@@ -73,7 +74,7 @@
+@@ -73,7 +74,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -2693,10 +3303,11 @@ diff -Nur linux-4.1.26.orig/arch/score/mm/fault.c linux-4.1.26/arch/score/mm/fau
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/sh/kernel/irq.c linux-4.1.26/arch/sh/kernel/irq.c
---- linux-4.1.26.orig/arch/sh/kernel/irq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sh/kernel/irq.c 2016-06-19 15:30:58.607294264 +0200
-@@ -147,6 +147,7 @@
+diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
+index eb10ff84015c..6fe8089e63fa 100644
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
@@ -2704,7 +3315,7 @@ diff -Nur linux-4.1.26.orig/arch/sh/kernel/irq.c linux-4.1.26/arch/sh/kernel/irq
void do_softirq_own_stack(void)
{
struct thread_info *curctx;
-@@ -174,6 +175,7 @@
+@@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
}
@@ -2712,9 +3323,10 @@ diff -Nur linux-4.1.26.orig/arch/sh/kernel/irq.c linux-4.1.26/arch/sh/kernel/irq
#else
static inline void handle_one_irq(unsigned int irq)
{
-diff -Nur linux-4.1.26.orig/arch/sh/mm/fault.c linux-4.1.26/arch/sh/mm/fault.c
---- linux-4.1.26.orig/arch/sh/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sh/mm/fault.c 2016-06-19 15:30:58.607294264 +0200
+diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
+index a58fec9b55e0..79d8276377d1 100644
+--- a/arch/sh/mm/fault.c
++++ b/arch/sh/mm/fault.c
@@ -17,6 +17,7 @@
#include <linux/kprobes.h>
#include <linux/perf_event.h>
@@ -2723,7 +3335,7 @@ diff -Nur linux-4.1.26.orig/arch/sh/mm/fault.c linux-4.1.26/arch/sh/mm/fault.c
#include <asm/io_trapped.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-@@ -438,9 +439,9 @@
+@@ -438,9 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
/*
* If we're in an interrupt, have no user context or are running
@@ -2735,10 +3347,11 @@ diff -Nur linux-4.1.26.orig/arch/sh/mm/fault.c linux-4.1.26/arch/sh/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-4.1.26.orig/arch/sparc/Kconfig linux-4.1.26/arch/sparc/Kconfig
---- linux-4.1.26.orig/arch/sparc/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sparc/Kconfig 2016-06-19 15:30:58.607294264 +0200
-@@ -189,12 +189,10 @@
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index e49502acbab4..85cb0c621283 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -189,12 +189,10 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
@@ -2753,10 +3366,11 @@ diff -Nur linux-4.1.26.orig/arch/sparc/Kconfig linux-4.1.26/arch/sparc/Kconfig
config GENERIC_HWEIGHT
bool
-diff -Nur linux-4.1.26.orig/arch/sparc/kernel/irq_64.c linux-4.1.26/arch/sparc/kernel/irq_64.c
---- linux-4.1.26.orig/arch/sparc/kernel/irq_64.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sparc/kernel/irq_64.c 2016-06-19 15:30:58.607294264 +0200
-@@ -849,6 +849,7 @@
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index 4033c23bdfa6..763cd88b4e92 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -849,6 +849,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
@@ -2764,7 +3378,7 @@ diff -Nur linux-4.1.26.orig/arch/sparc/kernel/irq_64.c linux-4.1.26/arch/sparc/k
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -863,6 +864,7 @@
+@@ -863,6 +864,7 @@ void do_softirq_own_stack(void)
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
@@ -2772,9 +3386,10 @@ diff -Nur linux-4.1.26.orig/arch/sparc/kernel/irq_64.c linux-4.1.26/arch/sparc/k
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
-diff -Nur linux-4.1.26.orig/arch/sparc/mm/fault_32.c linux-4.1.26/arch/sparc/mm/fault_32.c
---- linux-4.1.26.orig/arch/sparc/mm/fault_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sparc/mm/fault_32.c 2016-06-19 15:30:58.607294264 +0200
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 70d817154fe8..c399e7b3b035 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
@@ -21,6 +21,7 @@
#include <linux/perf_event.h>
#include <linux/interrupt.h>
@@ -2791,7 +3406,7 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/fault_32.c linux-4.1.26/arch/sparc/mm/
#include "mm_32.h"
-@@ -196,7 +196,7 @@
+@@ -196,7 +196,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -2800,9 +3415,10 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/fault_32.c linux-4.1.26/arch/sparc/mm/
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-4.1.26.orig/arch/sparc/mm/fault_64.c linux-4.1.26/arch/sparc/mm/fault_64.c
---- linux-4.1.26.orig/arch/sparc/mm/fault_64.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sparc/mm/fault_64.c 2016-06-19 15:30:58.607294264 +0200
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 479823249429..e9268ea1a68d 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
@@ -22,12 +22,12 @@
#include <linux/kdebug.h>
#include <linux/percpu.h>
@@ -2817,7 +3433,7 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/fault_64.c linux-4.1.26/arch/sparc/mm/
#include <asm/asi.h>
#include <asm/lsu.h>
#include <asm/sections.h>
-@@ -330,7 +330,7 @@
+@@ -330,7 +330,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -2826,10 +3442,11 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/fault_64.c linux-4.1.26/arch/sparc/mm/
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-4.1.26.orig/arch/sparc/mm/highmem.c linux-4.1.26/arch/sparc/mm/highmem.c
---- linux-4.1.26.orig/arch/sparc/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sparc/mm/highmem.c 2016-06-19 15:30:58.607294264 +0200
-@@ -53,7 +53,7 @@
+diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
+index 449f864f0cef..a454ec5ff07a 100644
+--- a/arch/sparc/mm/highmem.c
++++ b/arch/sparc/mm/highmem.c
+@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
unsigned long vaddr;
long idx, type;
@@ -2838,7 +3455,7 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/highmem.c linux-4.1.26/arch/sparc/mm/h
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -91,6 +91,7 @@
+@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
@@ -2846,17 +3463,18 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/highmem.c linux-4.1.26/arch/sparc/mm/h
return;
}
-@@ -126,5 +127,6 @@
+@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
kmap_atomic_idx_pop();
pagefault_enable();
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/sparc/mm/init_64.c linux-4.1.26/arch/sparc/mm/init_64.c
---- linux-4.1.26.orig/arch/sparc/mm/init_64.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/sparc/mm/init_64.c 2016-06-19 15:30:58.607294264 +0200
-@@ -2738,7 +2738,7 @@
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 1d71181dcc04..1a55c8481272 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2738,7 +2738,7 @@ void hugetlb_setup(struct pt_regs *regs)
struct mm_struct *mm = current->mm;
struct tsb_config *tp;
@@ -2865,10 +3483,11 @@ diff -Nur linux-4.1.26.orig/arch/sparc/mm/init_64.c linux-4.1.26/arch/sparc/mm/i
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
-diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/tile/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/tile/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/tile/include/asm/uaccess.h 2016-06-19 15:30:58.611294419 +0200
-@@ -78,7 +78,8 @@
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index f41cb53cf645..a33276bf5ca1 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -78,7 +78,8 @@ int __range_ok(unsigned long addr, unsigned long size);
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
@@ -2878,7 +3497,7 @@ diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/ti
*
* Checks if a pointer to a block of memory in user space is valid.
*
-@@ -192,7 +193,8 @@
+@@ -192,7 +193,8 @@ extern int __get_user_bad(void)
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -2888,7 +3507,7 @@ diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/ti
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -274,7 +276,8 @@
+@@ -274,7 +276,8 @@ extern int __put_user_bad(void)
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -2898,7 +3517,7 @@ diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/ti
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -330,7 +333,8 @@
+@@ -330,7 +333,8 @@ extern int __put_user_bad(void)
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -2908,7 +3527,7 @@ diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/ti
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -366,7 +370,8 @@
+@@ -366,7 +370,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -2918,7 +3537,7 @@ diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/ti
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
-@@ -437,7 +442,8 @@
+@@ -437,7 +442,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -2928,10 +3547,11 @@ diff -Nur linux-4.1.26.orig/arch/tile/include/asm/uaccess.h linux-4.1.26/arch/ti
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
-diff -Nur linux-4.1.26.orig/arch/tile/mm/fault.c linux-4.1.26/arch/tile/mm/fault.c
---- linux-4.1.26.orig/arch/tile/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/tile/mm/fault.c 2016-06-19 15:30:58.611294419 +0200
-@@ -354,9 +354,9 @@
+diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
+index e83cc999da02..3f4f58d34a92 100644
+--- a/arch/tile/mm/fault.c
++++ b/arch/tile/mm/fault.c
+@@ -354,9 +354,9 @@ static int handle_page_fault(struct pt_regs *regs,
/*
* If we're in an interrupt, have no user context or are running in an
@@ -2943,10 +3563,11 @@ diff -Nur linux-4.1.26.orig/arch/tile/mm/fault.c linux-4.1.26/arch/tile/mm/fault
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
-diff -Nur linux-4.1.26.orig/arch/tile/mm/highmem.c linux-4.1.26/arch/tile/mm/highmem.c
---- linux-4.1.26.orig/arch/tile/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/tile/mm/highmem.c 2016-06-19 15:30:58.611294419 +0200
-@@ -201,7 +201,7 @@
+diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
+index 6aa2f2625447..fcd545014e79 100644
+--- a/arch/tile/mm/highmem.c
++++ b/arch/tile/mm/highmem.c
+@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
int idx, type;
pte_t *pte;
@@ -2955,7 +3576,7 @@ diff -Nur linux-4.1.26.orig/arch/tile/mm/highmem.c linux-4.1.26/arch/tile/mm/hig
pagefault_disable();
/* Avoid icache flushes by disallowing atomic executable mappings. */
-@@ -259,6 +259,7 @@
+@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
}
pagefault_enable();
@@ -2963,10 +3584,11 @@ diff -Nur linux-4.1.26.orig/arch/tile/mm/highmem.c linux-4.1.26/arch/tile/mm/hig
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/um/kernel/trap.c linux-4.1.26/arch/um/kernel/trap.c
---- linux-4.1.26.orig/arch/um/kernel/trap.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/um/kernel/trap.c 2016-06-19 15:30:58.611294419 +0200
-@@ -35,10 +35,10 @@
+diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
+index 8e4daf44e980..f9c9e5a6beba 100644
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -35,10 +35,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
*code_out = SEGV_MAPERR;
/*
@@ -2979,10 +3601,11 @@ diff -Nur linux-4.1.26.orig/arch/um/kernel/trap.c linux-4.1.26/arch/um/kernel/tr
goto out_nosemaphore;
if (is_user)
-diff -Nur linux-4.1.26.orig/arch/unicore32/mm/fault.c linux-4.1.26/arch/unicore32/mm/fault.c
---- linux-4.1.26.orig/arch/unicore32/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/unicore32/mm/fault.c 2016-06-19 15:30:58.611294419 +0200
-@@ -218,7 +218,7 @@
+diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
+index 0dc922dba915..afccef5529cc 100644
+--- a/arch/unicore32/mm/fault.c
++++ b/arch/unicore32/mm/fault.c
+@@ -218,7 +218,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -2991,10 +3614,45 @@ diff -Nur linux-4.1.26.orig/arch/unicore32/mm/fault.c linux-4.1.26/arch/unicore3
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.26/arch/x86/crypto/aesni-intel_glue.c
---- linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/crypto/aesni-intel_glue.c 2016-06-19 15:30:58.611294419 +0200
-@@ -382,14 +382,14 @@
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 226d5696e1d1..aac357a4cd5c 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -22,6 +22,7 @@ config X86_64
+ ### Arch settings
+ config X86
+ def_bool y
++ select HAVE_PREEMPT_LAZY
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+ select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
+ select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -203,8 +204,11 @@ config ARCH_MAY_HAVE_PC_FDC
+ def_bool y
+ depends on ISA_DMA_API
+
++config RWSEM_GENERIC_SPINLOCK
++ def_bool PREEMPT_RT_FULL
++
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool y
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+@@ -838,7 +842,7 @@ config IOMMU_HELPER
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ depends on X86_64 && SMP && DEBUG_KERNEL
+- select CPUMASK_OFFSTACK
++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ ---help---
+ Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ If unsure, say N.
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index 112cefacf2af..3fd3b16349ae 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -382,14 +382,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -3012,7 +3670,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.26/arch
return err;
}
-@@ -406,14 +406,14 @@
+@@ -406,14 +406,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -3029,7 +3687,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.26/arch
return err;
}
-@@ -430,14 +430,14 @@
+@@ -430,14 +430,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -3046,7 +3704,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.26/arch
return err;
}
-@@ -454,14 +454,14 @@
+@@ -454,14 +454,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -3063,7 +3721,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.26/arch
return err;
}
-@@ -513,18 +513,20 @@
+@@ -513,18 +513,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -3086,10 +3744,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.26/arch
return err;
}
-diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x86/crypto/cast5_avx_glue.c
---- linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/crypto/cast5_avx_glue.c 2016-06-19 15:30:58.611294419 +0200
-@@ -60,7 +60,7 @@
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index 236c80974457..f799ec36bfa7 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -60,7 +60,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
@@ -3098,7 +3757,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
-@@ -76,7 +76,7 @@
+@@ -76,7 +76,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
@@ -3107,7 +3766,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -104,10 +104,9 @@
+@@ -104,10 +104,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
@@ -3119,7 +3778,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
return err;
}
-@@ -228,7 +227,7 @@
+@@ -228,7 +227,7 @@ done:
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
@@ -3128,7 +3787,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
struct blkcipher_walk walk;
int err;
-@@ -237,12 +236,11 @@
+@@ -237,12 +236,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
@@ -3143,7 +3802,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
return err;
}
-@@ -312,7 +310,7 @@
+@@ -312,7 +310,7 @@ done:
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
@@ -3152,7 +3811,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
struct blkcipher_walk walk;
int err;
-@@ -321,13 +319,12 @@
+@@ -321,13 +319,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
@@ -3168,10 +3827,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.26/arch/x
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
-diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/crypto/glue_helper.c
---- linux-4.1.26.orig/arch/x86/crypto/glue_helper.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/crypto/glue_helper.c 2016-06-19 15:30:58.611294419 +0200
-@@ -39,7 +39,7 @@
+diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
+index 6a85598931b5..3a506ce7ed93 100644
+--- a/arch/x86/crypto/glue_helper.c
++++ b/arch/x86/crypto/glue_helper.c
+@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes, i, func_bytes;
@@ -3180,7 +3840,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
int err;
err = blkcipher_walk_virt(desc, walk);
-@@ -49,7 +49,7 @@
+@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -3189,7 +3849,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
-@@ -71,10 +71,10 @@
+@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
}
done:
@@ -3201,7 +3861,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
return err;
}
-@@ -194,7 +194,7 @@
+@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
@@ -3210,7 +3870,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
struct blkcipher_walk walk;
int err;
-@@ -203,12 +203,12 @@
+@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
while ((nbytes = walk.nbytes)) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -3225,7 +3885,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
-@@ -277,7 +277,7 @@
+@@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
@@ -3234,7 +3894,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
struct blkcipher_walk walk;
int err;
-@@ -286,13 +286,12 @@
+@@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
while ((nbytes = walk.nbytes) >= bsize) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -3250,7 +3910,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
if (walk.nbytes) {
glue_ctr_crypt_final_128bit(
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -347,7 +346,7 @@
+@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
void *tweak_ctx, void *crypt_ctx)
{
const unsigned int bsize = 128 / 8;
@@ -3259,7 +3919,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
struct blkcipher_walk walk;
int err;
-@@ -360,21 +359,21 @@
+@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -3286,10 +3946,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/crypto/glue_helper.c linux-4.1.26/arch/x86/
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/preempt.h linux-4.1.26/arch/x86/include/asm/preempt.h
---- linux-4.1.26.orig/arch/x86/include/asm/preempt.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/preempt.h 2016-06-19 15:30:58.611294419 +0200
-@@ -82,17 +82,33 @@
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 67b6cd00a44f..eff1b8609f77 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -82,17 +82,46 @@ static __always_inline void __preempt_count_sub(int val)
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
@@ -3304,6 +3965,8 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/preempt.h linux-4.1.26/arch/x86
+ if (____preempt_count_dec_and_test())
+ return true;
+#ifdef CONFIG_PREEMPT_LAZY
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
+ return false;
@@ -3316,18 +3979,30 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/preempt.h linux-4.1.26/arch/x86
static __always_inline bool should_resched(int preempt_offset)
{
+#ifdef CONFIG_PREEMPT_LAZY
-+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
-+ test_thread_flag(TIF_NEED_RESCHED_LAZY));
++ u32 tmp;
++
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
++
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp)
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
+#endif
}
#ifdef CONFIG_PREEMPT
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/signal.h linux-4.1.26/arch/x86/include/asm/signal.h
---- linux-4.1.26.orig/arch/x86/include/asm/signal.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/signal.h 2016-06-19 15:30:58.611294419 +0200
-@@ -23,6 +23,19 @@
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 31eab867e6d3..0e7bfe98e1d1 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -23,6 +23,19 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
@@ -3347,9 +4022,10 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/signal.h linux-4.1.26/arch/x86/
#ifndef CONFIG_COMPAT
typedef sigset_t compat_sigset_t;
#endif
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/stackprotector.h linux-4.1.26/arch/x86/include/asm/stackprotector.h
---- linux-4.1.26.orig/arch/x86/include/asm/stackprotector.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/stackprotector.h 2016-06-19 15:30:58.611294419 +0200
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 6a998598f172..64fb5cbe54fa 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
@@ -57,7 +57,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
@@ -3359,7 +4035,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/stackprotector.h linux-4.1.26/a
u64 tsc;
#ifdef CONFIG_X86_64
-@@ -68,8 +68,16 @@
+@@ -68,8 +68,16 @@ static __always_inline void boot_init_stack_canary(void)
* of randomness. The TSC only matters for very early init,
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.
@@ -3376,10 +4052,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/stackprotector.h linux-4.1.26/a
tsc = __native_read_tsc();
canary += tsc + (tsc << 32UL);
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/thread_info.h linux-4.1.26/arch/x86/include/asm/thread_info.h
---- linux-4.1.26.orig/arch/x86/include/asm/thread_info.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/thread_info.h 2016-06-19 15:30:58.611294419 +0200
-@@ -55,6 +55,8 @@
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index b4bdec3e9523..606144afb990 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -55,6 +55,8 @@ struct thread_info {
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
int saved_preempt_count;
@@ -3388,7 +4065,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/thread_info.h linux-4.1.26/arch
mm_segment_t addr_limit;
void __user *sysenter_return;
unsigned int sig_on_uaccess_error:1;
-@@ -95,6 +97,7 @@
+@@ -95,6 +97,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -3396,7 +4073,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/thread_info.h linux-4.1.26/arch
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
-@@ -119,6 +122,7 @@
+@@ -119,6 +122,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -3404,7 +4081,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/thread_info.h linux-4.1.26/arch
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
-@@ -168,6 +172,8 @@
+@@ -168,6 +172,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -3413,33 +4090,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/thread_info.h linux-4.1.26/arch
#define STACK_WARN (THREAD_SIZE/8)
/*
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess_32.h linux-4.1.26/arch/x86/include/asm/uaccess_32.h
---- linux-4.1.26.orig/arch/x86/include/asm/uaccess_32.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/uaccess_32.h 2016-06-19 15:30:58.615294573 +0200
-@@ -70,7 +70,8 @@
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -117,7 +118,8 @@
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess.h linux-4.1.26/arch/x86/include/asm/uaccess.h
---- linux-4.1.26.orig/arch/x86/include/asm/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/uaccess.h 2016-06-19 15:30:58.611294419 +0200
-@@ -74,7 +74,8 @@
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index d081e7e42fb3..705e3d89d84d 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -74,7 +74,8 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
@@ -3449,7 +4104,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess.h linux-4.1.26/arch/x86
*
* Checks if a pointer to a block of memory in user space is valid.
*
-@@ -145,7 +146,8 @@
+@@ -145,7 +146,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -3459,7 +4114,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess.h linux-4.1.26/arch/x86
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -240,7 +242,8 @@
+@@ -240,7 +242,8 @@ extern void __put_user_8(void);
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -3469,7 +4124,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess.h linux-4.1.26/arch/x86
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-@@ -455,7 +458,8 @@
+@@ -459,7 +462,8 @@ struct __large_struct { unsigned long buf[100]; };
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
@@ -3479,7 +4134,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess.h linux-4.1.26/arch/x86
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-@@ -479,7 +483,8 @@
+@@ -483,7 +487,8 @@ struct __large_struct { unsigned long buf[100]; };
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
@@ -3489,10 +4144,35 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uaccess.h linux-4.1.26/arch/x86
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.26/arch/x86/include/asm/uv/uv_bau.h
---- linux-4.1.26.orig/arch/x86/include/asm/uv/uv_bau.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/uv/uv_bau.h 2016-06-19 15:30:58.615294573 +0200
-@@ -615,9 +615,9 @@
+diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
+index 3c03a5de64d3..7c8ad3451988 100644
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -70,7 +70,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+- * Context: User context only. This function may sleep.
++ * Context: User context only. This function may sleep if pagefaults are
++ * enabled.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+@@ -117,7 +118,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+- * Context: User context only. This function may sleep.
++ * Context: User context only. This function may sleep if pagefaults are
++ * enabled.
+ *
+ * Copy data from user space to kernel space. Caller must check
+ * the specified block with access_ok() before calling this function.
+diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
+index fc808b83fccb..ebb40118abf5 100644
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -615,9 +615,9 @@ struct bau_control {
cycles_t send_message;
cycles_t period_end;
cycles_t period_time;
@@ -3505,7 +4185,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.26/arch/x
/* tunables */
int max_concurr;
int max_concurr_const;
-@@ -776,15 +776,15 @@
+@@ -776,15 +776,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
* to be lowered below the current 'v'. atomic_add_unless can only stop
* on equal.
*/
@@ -3525,10 +4205,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.26/arch/x
return 1;
}
-diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.1.26/arch/x86/include/asm/uv/uv_hub.h
---- linux-4.1.26.orig/arch/x86/include/asm/uv/uv_hub.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/include/asm/uv/uv_hub.h 2016-06-19 15:30:58.615294573 +0200
-@@ -492,7 +492,7 @@
+diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
+index a00ad8f2a657..c2729abe02bc 100644
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -492,7 +492,7 @@ struct uv_blade_info {
unsigned short nr_online_cpus;
unsigned short pnode;
short memory_nid;
@@ -3537,43 +4218,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.1.26/arch/x
unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
};
extern struct uv_blade_info *uv_blade_info;
-diff -Nur linux-4.1.26.orig/arch/x86/Kconfig linux-4.1.26/arch/x86/Kconfig
---- linux-4.1.26.orig/arch/x86/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/Kconfig 2016-06-19 15:30:58.611294419 +0200
-@@ -22,6 +22,7 @@
- ### Arch settings
- config X86
- def_bool y
-+ select HAVE_PREEMPT_LAZY
- select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
- select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-@@ -203,8 +204,11 @@
- def_bool y
- depends on ISA_DMA_API
-
-+config RWSEM_GENERIC_SPINLOCK
-+ def_bool PREEMPT_RT_FULL
-+
- config RWSEM_XCHGADD_ALGORITHM
-- def_bool y
-+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_CALIBRATE_DELAY
- def_bool y
-@@ -838,7 +842,7 @@
- config MAXSMP
- bool "Enable Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP && DEBUG_KERNEL
-- select CPUMASK_OFFSTACK
-+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
- ---help---
- Enable maximum number of CPUS and NUMA Nodes for this architecture.
- If unsure, say N.
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/apic/io_apic.c linux-4.1.26/arch/x86/kernel/apic/io_apic.c
---- linux-4.1.26.orig/arch/x86/kernel/apic/io_apic.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/apic/io_apic.c 2016-06-19 15:30:58.615294573 +0200
-@@ -1891,7 +1891,8 @@
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index f4dc2462a1ac..07c6aba75aa0 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1891,7 +1891,8 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
/* If we are moving the irq we need to mask it */
@@ -3583,10 +4232,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/apic/io_apic.c linux-4.1.26/arch/x86
mask_ioapic(cfg);
return true;
}
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.1.26/arch/x86/kernel/apic/x2apic_uv_x.c
---- linux-4.1.26.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/apic/x2apic_uv_x.c 2016-06-19 15:30:58.615294573 +0200
-@@ -949,7 +949,7 @@
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index c8d92950bc04..3d2fbca33b73 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -949,7 +949,7 @@ void __init uv_system_init(void)
uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
@@ -3595,10 +4245,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.1.26/arch
min_pnode = min(pnode, min_pnode);
max_pnode = max(pnode, max_pnode);
blade++;
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/asm-offsets.c linux-4.1.26/arch/x86/kernel/asm-offsets.c
---- linux-4.1.26.orig/arch/x86/kernel/asm-offsets.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/asm-offsets.c 2016-06-19 15:30:58.615294573 +0200
-@@ -32,6 +32,7 @@
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 9f6b9341950f..5701b507510b 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -32,6 +32,7 @@ void common(void) {
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
OFFSET(TI_addr_limit, thread_info, addr_limit);
@@ -3606,15 +4257,16 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/asm-offsets.c linux-4.1.26/arch/x86/
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -71,4 +72,5 @@
+@@ -71,4 +72,5 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/cpu/mcheck/mce.c 2016-06-19 15:30:58.615294573 +0200
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 20190bdac9d5..9d46f9a133e1 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -41,6 +41,8 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
@@ -3624,7 +4276,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1267,7 +1269,7 @@
+@@ -1267,7 +1269,7 @@ void mce_log_therm_throt_event(__u64 status)
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -3633,7 +4285,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1276,32 +1278,18 @@
+@@ -1276,32 +1278,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -3672,7 +4324,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1324,7 +1312,7 @@
+@@ -1324,7 +1312,7 @@ static void mce_timer_fn(unsigned long data)
done:
__this_cpu_write(mce_next_interval, iv);
@@ -3681,7 +4333,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
}
/*
-@@ -1332,7 +1320,7 @@
+@@ -1332,7 +1320,7 @@ done:
*/
void mce_timer_kick(unsigned long interval)
{
@@ -3690,7 +4342,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1347,7 +1335,7 @@
+@@ -1347,7 +1335,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -3699,7 +4351,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1357,6 +1345,56 @@
+@@ -1357,6 +1345,56 @@ static void mce_do_trigger(struct work_struct *work)
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -3756,7 +4408,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1364,19 +1402,8 @@
+@@ -1364,19 +1402,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
*/
int mce_notify_irq(void)
{
@@ -3777,7 +4429,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
return 1;
}
return 0;
-@@ -1649,7 +1676,7 @@
+@@ -1649,7 +1676,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
}
}
@@ -3786,7 +4438,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
{
unsigned long iv = check_interval * HZ;
-@@ -1658,16 +1685,17 @@
+@@ -1658,16 +1685,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
per_cpu(mce_next_interval, cpu) = iv;
@@ -3808,7 +4460,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
mce_start_timer(cpu, t);
}
-@@ -2345,6 +2373,8 @@
+@@ -2345,6 +2373,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -3817,7 +4469,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
for (i = 0; i < mca_cfg.banks; i++) {
-@@ -2371,6 +2401,7 @@
+@@ -2371,6 +2401,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
@@ -3825,7 +4477,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2378,7 +2409,6 @@
+@@ -2378,7 +2409,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -3833,7 +4485,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2398,11 +2428,9 @@
+@@ -2398,11 +2428,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
@@ -3845,7 +4497,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
break;
}
-@@ -2441,6 +2469,10 @@
+@@ -2441,6 +2469,10 @@ static __init int mcheck_init_device(void)
goto err_out;
}
@@ -3856,10 +4508,96 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.26/arch/x
if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
err = -ENOMEM;
goto err_out;
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.26/arch/x86/kernel/dumpstack_32.c
---- linux-4.1.26.orig/arch/x86/kernel/dumpstack_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/dumpstack_32.c 2016-06-19 15:30:58.615294573 +0200
-@@ -42,7 +42,7 @@
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+index 358c54ad20d4..94689f19ad92 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+@@ -119,7 +119,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \
+ };
+
+ struct rapl_pmu {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ int n_active; /* number of active events */
+ struct list_head active_list;
+ struct pmu *pmu; /* pointer to rapl_pmu_class */
+@@ -223,13 +223,13 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
+ if (!pmu->n_active)
+ return HRTIMER_NORESTART;
+
+- spin_lock_irqsave(&pmu->lock, flags);
++ raw_spin_lock_irqsave(&pmu->lock, flags);
+
+ list_for_each_entry(event, &pmu->active_list, active_entry) {
+ rapl_event_update(event);
+ }
+
+- spin_unlock_irqrestore(&pmu->lock, flags);
++ raw_spin_unlock_irqrestore(&pmu->lock, flags);
+
+ hrtimer_forward_now(hrtimer, pmu->timer_interval);
+
+@@ -266,9 +266,9 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+ unsigned long flags;
+
+- spin_lock_irqsave(&pmu->lock, flags);
++ raw_spin_lock_irqsave(&pmu->lock, flags);
+ __rapl_pmu_event_start(pmu, event);
+- spin_unlock_irqrestore(&pmu->lock, flags);
++ raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ }
+
+ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+@@ -277,7 +277,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+- spin_lock_irqsave(&pmu->lock, flags);
++ raw_spin_lock_irqsave(&pmu->lock, flags);
+
+ /* mark event as deactivated and stopped */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+@@ -302,7 +302,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+- spin_unlock_irqrestore(&pmu->lock, flags);
++ raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ }
+
+ static int rapl_pmu_event_add(struct perf_event *event, int mode)
+@@ -311,14 +311,14 @@ static int rapl_pmu_event_add(struct perf_event *event, int mode)
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+- spin_lock_irqsave(&pmu->lock, flags);
++ raw_spin_lock_irqsave(&pmu->lock, flags);
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_START)
+ __rapl_pmu_event_start(pmu, event);
+
+- spin_unlock_irqrestore(&pmu->lock, flags);
++ raw_spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+ }
+@@ -594,7 +594,7 @@ static int rapl_cpu_prepare(int cpu)
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ if (!pmu)
+ return -1;
+- spin_lock_init(&pmu->lock);
++ raw_spin_lock_init(&pmu->lock);
+
+ INIT_LIST_HEAD(&pmu->active_list);
+
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index 464ffd69b92e..00db1aad1548 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
@@ -3868,7 +4606,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.26/arch/x86
int graph = 0;
u32 *prev_esp;
-@@ -86,7 +86,7 @@
+@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
break;
touch_nmi_watchdog();
}
@@ -3877,10 +4615,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.26/arch/x86
}
EXPORT_SYMBOL(dump_trace);
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.26/arch/x86/kernel/dumpstack_64.c
---- linux-4.1.26.orig/arch/x86/kernel/dumpstack_64.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/dumpstack_64.c 2016-06-19 15:30:58.615294573 +0200
-@@ -152,7 +152,7 @@
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 5f1c6266eb30..c331e3fef465 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
@@ -3889,7 +4628,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.26/arch/x86
struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
-@@ -241,7 +241,7 @@
+@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* This handles the process stack:
*/
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
@@ -3898,7 +4637,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.26/arch/x86
}
EXPORT_SYMBOL(dump_trace);
-@@ -255,7 +255,7 @@
+@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
int cpu;
int i;
@@ -3907,7 +4646,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.26/arch/x86
cpu = smp_processor_id();
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-@@ -291,7 +291,7 @@
+@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
pr_cont(" %016lx", *stack++);
touch_nmi_watchdog();
}
@@ -3916,10 +4655,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.26/arch/x86
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_32.S linux-4.1.26/arch/x86/kernel/entry_32.S
---- linux-4.1.26.orig/arch/x86/kernel/entry_32.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/entry_32.S 2016-06-19 15:30:58.615294573 +0200
-@@ -359,8 +359,24 @@
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 1c309763e321..8612b314af92 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -359,8 +359,24 @@ END(ret_from_exception)
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
need_resched:
@@ -3944,7 +4684,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_32.S linux-4.1.26/arch/x86/ker
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
-@@ -594,7 +610,7 @@
+@@ -594,7 +610,7 @@ ENDPROC(system_call)
ALIGN
RING0_PTREGS_FRAME # can't unwind into user space anyway
work_pending:
@@ -3953,7 +4693,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_32.S linux-4.1.26/arch/x86/ker
jz work_notifysig
work_resched:
call schedule
-@@ -607,7 +623,7 @@
+@@ -607,7 +623,7 @@ work_resched:
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
jz restore_all
@@ -3962,10 +4702,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_32.S linux-4.1.26/arch/x86/ker
jnz work_resched
work_notifysig: # deal with pending signals and
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_64.S linux-4.1.26/arch/x86/kernel/entry_64.S
---- linux-4.1.26.orig/arch/x86/kernel/entry_64.S 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/entry_64.S 2016-06-19 15:30:58.615294573 +0200
-@@ -370,8 +370,8 @@
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 6c9cb6073832..db2a15c91a65 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -370,8 +370,8 @@ GLOBAL(int_with_check)
/* First do a reschedule test. */
/* edx: work, edi: workmask */
int_careful:
@@ -3976,7 +4717,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_64.S linux-4.1.26/arch/x86/ker
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
-@@ -776,7 +776,23 @@
+@@ -776,7 +776,23 @@ retint_kernel:
bt $9,EFLAGS(%rsp) /* interrupts were off? */
jnc 1f
0: cmpl $0,PER_CPU_VAR(__preempt_count)
@@ -4000,7 +4741,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_64.S linux-4.1.26/arch/x86/ker
call preempt_schedule_irq
jmp 0b
1:
-@@ -844,8 +860,8 @@
+@@ -844,8 +860,8 @@ native_irq_return_ldt:
/* edi: workmask, edx: work */
retint_careful:
CFI_RESTORE_STATE
@@ -4011,7 +4752,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_64.S linux-4.1.26/arch/x86/ker
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
pushq_cfi %rdi
-@@ -1118,6 +1134,7 @@
+@@ -1118,6 +1134,7 @@ bad_gs:
jmp 2b
.previous
@@ -4019,7 +4760,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_64.S linux-4.1.26/arch/x86/ker
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
CFI_STARTPROC
-@@ -1137,6 +1154,7 @@
+@@ -1137,6 +1154,7 @@ ENTRY(do_softirq_own_stack)
ret
CFI_ENDPROC
END(do_softirq_own_stack)
@@ -4027,10 +4768,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/entry_64.S linux-4.1.26/arch/x86/ker
#ifdef CONFIG_XEN
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/irq_32.c linux-4.1.26/arch/x86/kernel/irq_32.c
---- linux-4.1.26.orig/arch/x86/kernel/irq_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/irq_32.c 2016-06-19 15:30:58.619294727 +0200
-@@ -135,6 +135,7 @@
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index f9fd86a7fcc7..521ef3cc8066 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -135,6 +135,7 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
@@ -4038,7 +4780,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/irq_32.c linux-4.1.26/arch/x86/kerne
void do_softirq_own_stack(void)
{
struct thread_info *curstk;
-@@ -153,6 +154,7 @@
+@@ -153,6 +154,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
@@ -4046,9 +4788,153 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/irq_32.c linux-4.1.26/arch/x86/kerne
bool handle_irq(unsigned irq, struct pt_regs *regs)
{
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/process_32.c linux-4.1.26/arch/x86/kernel/process_32.c
---- linux-4.1.26.orig/arch/x86/kernel/process_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/process_32.c 2016-06-19 15:30:58.619294727 +0200
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 9435620062df..ba97b5b45879 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -36,6 +36,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/debugfs.h>
+ #include <linux/nmi.h>
++#include <linux/wait-simple.h>
+ #include <asm/timer.h>
+ #include <asm/cpu.h>
+ #include <asm/traps.h>
+@@ -91,14 +92,14 @@ static void kvm_io_delay(void)
+
+ struct kvm_task_sleep_node {
+ struct hlist_node link;
+- wait_queue_head_t wq;
++ struct swait_head wq;
+ u32 token;
+ int cpu;
+ bool halted;
+ };
+
+ static struct kvm_task_sleep_head {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct hlist_head list;
+ } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+
+@@ -122,17 +123,17 @@ void kvm_async_pf_task_wait(u32 token)
+ u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+ struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+ struct kvm_task_sleep_node n, *e;
+- DEFINE_WAIT(wait);
++ DEFINE_SWAITER(wait);
+
+ rcu_irq_enter();
+
+- spin_lock(&b->lock);
++ raw_spin_lock(&b->lock);
+ e = _find_apf_task(b, token);
+ if (e) {
+ /* dummy entry exist -> wake up was delivered ahead of PF */
+ hlist_del(&e->link);
+ kfree(e);
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+
+ rcu_irq_exit();
+ return;
+@@ -141,13 +142,13 @@ void kvm_async_pf_task_wait(u32 token)
+ n.token = token;
+ n.cpu = smp_processor_id();
+ n.halted = is_idle_task(current) || preempt_count() > 1;
+- init_waitqueue_head(&n.wq);
++ init_swait_head(&n.wq);
+ hlist_add_head(&n.link, &b->list);
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+
+ for (;;) {
+ if (!n.halted)
+- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
++ swait_prepare(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (hlist_unhashed(&n.link))
+ break;
+
+@@ -166,7 +167,7 @@ void kvm_async_pf_task_wait(u32 token)
+ }
+ }
+ if (!n.halted)
+- finish_wait(&n.wq, &wait);
++ swait_finish(&n.wq, &wait);
+
+ rcu_irq_exit();
+ return;
+@@ -178,8 +179,8 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
+ hlist_del_init(&n->link);
+ if (n->halted)
+ smp_send_reschedule(n->cpu);
+- else if (waitqueue_active(&n->wq))
+- wake_up(&n->wq);
++ else if (swaitqueue_active(&n->wq))
++ swait_wake(&n->wq);
+ }
+
+ static void apf_task_wake_all(void)
+@@ -189,14 +190,14 @@ static void apf_task_wake_all(void)
+ for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+ struct hlist_node *p, *next;
+ struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+- spin_lock(&b->lock);
++ raw_spin_lock(&b->lock);
+ hlist_for_each_safe(p, next, &b->list) {
+ struct kvm_task_sleep_node *n =
+ hlist_entry(p, typeof(*n), link);
+ if (n->cpu == smp_processor_id())
+ apf_task_wake_one(n);
+ }
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+ }
+ }
+
+@@ -212,7 +213,7 @@ void kvm_async_pf_task_wake(u32 token)
+ }
+
+ again:
+- spin_lock(&b->lock);
++ raw_spin_lock(&b->lock);
+ n = _find_apf_task(b, token);
+ if (!n) {
+ /*
+@@ -225,17 +226,17 @@ again:
+ * Allocation failed! Busy wait while other cpu
+ * handles async PF.
+ */
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+ cpu_relax();
+ goto again;
+ }
+ n->token = token;
+ n->cpu = smp_processor_id();
+- init_waitqueue_head(&n->wq);
++ init_swait_head(&n->wq);
+ hlist_add_head(&n->link, &b->list);
+ } else
+ apf_task_wake_one(n);
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+ return;
+ }
+ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+@@ -486,7 +487,7 @@ void __init kvm_guest_init(void)
+ paravirt_ops_setup();
+ register_reboot_notifier(&kvm_pv_reboot_nb);
+ for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+- spin_lock_init(&async_pf_sleepers[i].lock);
++ raw_spin_lock_init(&async_pf_sleepers[i].lock);
+ if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+ x86_init.irqs.trap_init = kvm_apf_trap_init;
+
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 8ed2106b06da..3a70713079c5 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -4057,7 +4943,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/process_32.c linux-4.1.26/arch/x86/k
#include <asm/pgtable.h>
#include <asm/ldt.h>
-@@ -210,6 +211,35 @@
+@@ -210,6 +211,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
}
EXPORT_SYMBOL_GPL(start_thread);
@@ -4093,7 +4979,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/process_32.c linux-4.1.26/arch/x86/k
/*
* switch_to(x,y) should switch tasks from x to y.
-@@ -292,6 +322,8 @@
+@@ -292,6 +322,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
@@ -4102,10 +4988,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/process_32.c linux-4.1.26/arch/x86/k
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
-diff -Nur linux-4.1.26.orig/arch/x86/kernel/signal.c linux-4.1.26/arch/x86/kernel/signal.c
---- linux-4.1.26.orig/arch/x86/kernel/signal.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kernel/signal.c 2016-06-19 15:30:58.619294727 +0200
-@@ -726,6 +726,14 @@
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 5d2e2e9af1c4..1c9cc74ba99b 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -726,6 +726,14 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
{
user_exit();
@@ -4120,10 +5007,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kernel/signal.c linux-4.1.26/arch/x86/kerne
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
-diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic.c
---- linux-4.1.26.orig/arch/x86/kvm/lapic.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kvm/lapic.c 2016-06-19 15:30:58.619294727 +0200
-@@ -1106,7 +1106,7 @@
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 7dd9a8d3911a..192de5908083 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1106,7 +1106,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
static void apic_timer_expired(struct kvm_lapic *apic)
{
struct kvm_vcpu *vcpu = apic->vcpu;
@@ -4132,7 +5020,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
struct kvm_timer *ktimer = &apic->lapic_timer;
if (atomic_read(&apic->lapic_timer.pending))
-@@ -1115,8 +1115,8 @@
+@@ -1115,8 +1115,8 @@ static void apic_timer_expired(struct kvm_lapic *apic)
atomic_inc(&apic->lapic_timer.pending);
kvm_set_pending_timer(vcpu);
@@ -4143,7 +5031,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
if (apic_lvtt_tscdeadline(apic))
ktimer->expired_tscdeadline = ktimer->tscdeadline;
-@@ -1169,8 +1169,36 @@
+@@ -1169,8 +1169,36 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
__delay(tsc_deadline - guest_tsc);
}
@@ -4180,7 +5068,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
ktime_t now;
atomic_set(&apic->lapic_timer.pending, 0);
-@@ -1201,9 +1229,11 @@
+@@ -1201,9 +1229,11 @@ static void start_apic_timer(struct kvm_lapic *apic)
}
}
@@ -4193,7 +5081,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
-@@ -1235,8 +1265,10 @@
+@@ -1235,8 +1265,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
do_div(ns, this_tsc_khz);
expire = ktime_add_ns(now, ns);
expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
@@ -4205,7 +5093,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
} else
apic_timer_expired(apic);
-@@ -1709,6 +1741,7 @@
+@@ -1709,6 +1741,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = apic_timer_fn;
@@ -4213,7 +5101,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
-@@ -1836,7 +1869,8 @@
+@@ -1836,7 +1869,8 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
timer = &vcpu->arch.apic->lapic_timer.timer;
if (hrtimer_cancel(timer))
@@ -4223,10 +5111,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/lapic.c linux-4.1.26/arch/x86/kvm/lapic
}
/*
-diff -Nur linux-4.1.26.orig/arch/x86/kvm/x86.c linux-4.1.26/arch/x86/kvm/x86.c
---- linux-4.1.26.orig/arch/x86/kvm/x86.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/kvm/x86.c 2016-06-19 15:30:58.619294727 +0200
-@@ -5810,6 +5810,13 @@
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index bd84d2226ca1..ba639dd5f09d 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5815,6 +5815,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
@@ -4240,10 +5129,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/kvm/x86.c linux-4.1.26/arch/x86/kvm/x86.c
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
-diff -Nur linux-4.1.26.orig/arch/x86/lib/usercopy_32.c linux-4.1.26/arch/x86/lib/usercopy_32.c
---- linux-4.1.26.orig/arch/x86/lib/usercopy_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/lib/usercopy_32.c 2016-06-19 15:30:58.619294727 +0200
-@@ -647,7 +647,8 @@
+diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
+index e2f5e21c03b3..91d93b95bd86 100644
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -647,7 +647,8 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
@@ -4253,7 +5143,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/lib/usercopy_32.c linux-4.1.26/arch/x86/lib
*
* Copy data from kernel space to user space.
*
-@@ -668,7 +669,8 @@
+@@ -668,7 +669,8 @@ EXPORT_SYMBOL(_copy_to_user);
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
@@ -4263,9 +5153,10 @@ diff -Nur linux-4.1.26.orig/arch/x86/lib/usercopy_32.c linux-4.1.26/arch/x86/lib
*
* Copy data from user space to kernel space.
*
-diff -Nur linux-4.1.26.orig/arch/x86/mm/fault.c linux-4.1.26/arch/x86/mm/fault.c
---- linux-4.1.26.orig/arch/x86/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/mm/fault.c 2016-06-19 15:30:58.619294727 +0200
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 62855ac37ab7..1d3beaf7526f 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
@@ -13,6 +13,7 @@
#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <linux/prefetch.h> /* prefetchw */
@@ -4274,7 +5165,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/fault.c linux-4.1.26/arch/x86/mm/fault.c
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
-@@ -1133,9 +1134,9 @@
+@@ -1133,9 +1134,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
/*
* If we're in an interrupt, have no user context or are running
@@ -4286,10 +5177,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/fault.c linux-4.1.26/arch/x86/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-4.1.26.orig/arch/x86/mm/highmem_32.c linux-4.1.26/arch/x86/mm/highmem_32.c
---- linux-4.1.26.orig/arch/x86/mm/highmem_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/mm/highmem_32.c 2016-06-19 15:30:58.619294727 +0200
-@@ -32,10 +32,11 @@
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 4500142bc4aa..0d1cbcf47f80 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
@@ -4302,7 +5194,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/highmem_32.c linux-4.1.26/arch/x86/mm/hi
pagefault_disable();
if (!PageHighMem(page))
-@@ -45,7 +46,10 @@
+@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -4314,7 +5206,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/highmem_32.c linux-4.1.26/arch/x86/mm/hi
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
-@@ -88,6 +92,9 @@
+@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
@@ -4324,7 +5216,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/highmem_32.c linux-4.1.26/arch/x86/mm/hi
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
-@@ -100,6 +107,7 @@
+@@ -100,6 +107,7 @@ void __kunmap_atomic(void *kvaddr)
#endif
pagefault_enable();
@@ -4332,10 +5224,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/highmem_32.c linux-4.1.26/arch/x86/mm/hi
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/x86/mm/iomap_32.c linux-4.1.26/arch/x86/mm/iomap_32.c
---- linux-4.1.26.orig/arch/x86/mm/iomap_32.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/mm/iomap_32.c 2016-06-19 15:30:58.619294727 +0200
-@@ -56,15 +56,22 @@
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 9ca35fc60cfe..b2ffa5c7d3d3 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -56,15 +56,22 @@ EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
@@ -4359,7 +5252,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/iomap_32.c linux-4.1.26/arch/x86/mm/ioma
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
-@@ -112,10 +119,14 @@
+@@ -112,10 +119,14 @@ iounmap_atomic(void __iomem *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
@@ -4374,10 +5267,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/mm/iomap_32.c linux-4.1.26/arch/x86/mm/ioma
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(iounmap_atomic);
-diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/platform/uv/tlb_uv.c
---- linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/platform/uv/tlb_uv.c 2016-06-19 15:30:58.619294727 +0200
-@@ -714,9 +714,9 @@
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 3b6ec42718e4..7871083de089 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -714,9 +714,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
quiesce_local_uvhub(hmaster);
@@ -4389,7 +5283,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
end_uvhub_quiesce(hmaster);
-@@ -736,9 +736,9 @@
+@@ -736,9 +736,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
quiesce_local_uvhub(hmaster);
@@ -4401,7 +5295,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
end_uvhub_quiesce(hmaster);
-@@ -759,7 +759,7 @@
+@@ -759,7 +759,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
cycles_t tm1;
hmaster = bcp->uvhub_master;
@@ -4410,7 +5304,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
if (!bcp->baudisabled) {
stat->s_bau_disabled++;
tm1 = get_cycles();
-@@ -772,7 +772,7 @@
+@@ -772,7 +772,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
}
}
}
@@ -4419,7 +5313,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
}
static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -835,7 +835,7 @@
+@@ -835,7 +835,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
*/
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
{
@@ -4428,7 +5322,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
atomic_t *v;
v = &hmaster->active_descriptor_count;
-@@ -968,7 +968,7 @@
+@@ -968,7 +968,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
struct bau_control *hmaster;
hmaster = bcp->uvhub_master;
@@ -4437,7 +5331,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
stat->s_bau_reenabled++;
for_each_present_cpu(tcpu) {
-@@ -980,10 +980,10 @@
+@@ -980,10 +980,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
tbcp->period_giveups = 0;
}
}
@@ -4450,7 +5344,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
return -1;
}
-@@ -1901,9 +1901,9 @@
+@@ -1901,9 +1901,9 @@ static void __init init_per_cpu_tunables(void)
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
@@ -4463,10 +5357,11 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.26/arch/x86/
}
}
-diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86/platform/uv/uv_time.c
---- linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/x86/platform/uv/uv_time.c 2016-06-19 15:30:58.623294881 +0200
-@@ -58,7 +58,7 @@
+diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
+index a244237f3cfa..a718fe0d2e73 100644
+--- a/arch/x86/platform/uv/uv_time.c
++++ b/arch/x86/platform/uv/uv_time.c
+@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
/* There is one of these allocated per node */
struct uv_rtc_timer_head {
@@ -4475,7 +5370,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
/* next cpu waiting for timer, local node relative: */
int next_cpu;
/* number of cpus on this node: */
-@@ -178,7 +178,7 @@
+@@ -178,7 +178,7 @@ static __init int uv_rtc_allocate_timers(void)
uv_rtc_deallocate_timers();
return -ENOMEM;
}
@@ -4484,7 +5379,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
head->ncpus = uv_blade_nr_possible_cpus(bid);
head->next_cpu = -1;
blade_info[bid] = head;
-@@ -232,7 +232,7 @@
+@@ -232,7 +232,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
unsigned long flags;
int next_cpu;
@@ -4493,7 +5388,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
next_cpu = head->next_cpu;
*t = expires;
-@@ -244,12 +244,12 @@
+@@ -244,12 +244,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
if (uv_setup_intr(cpu, expires)) {
*t = ULLONG_MAX;
uv_rtc_find_next_timer(head, pnode);
@@ -4508,7 +5403,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
return 0;
}
-@@ -268,7 +268,7 @@
+@@ -268,7 +268,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
unsigned long flags;
int rc = 0;
@@ -4517,7 +5412,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
rc = 1;
-@@ -280,7 +280,7 @@
+@@ -280,7 +280,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
uv_rtc_find_next_timer(head, pnode);
}
@@ -4526,7 +5421,7 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
return rc;
}
-@@ -300,13 +300,18 @@
+@@ -300,13 +300,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
static cycle_t uv_read_rtc(struct clocksource *cs)
{
unsigned long offset;
@@ -4546,9 +5441,10 @@ diff -Nur linux-4.1.26.orig/arch/x86/platform/uv/uv_time.c linux-4.1.26/arch/x86
}
/*
-diff -Nur linux-4.1.26.orig/arch/xtensa/mm/fault.c linux-4.1.26/arch/xtensa/mm/fault.c
---- linux-4.1.26.orig/arch/xtensa/mm/fault.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/xtensa/mm/fault.c 2016-06-19 15:30:58.623294881 +0200
+diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
+index 9e3571a6535c..83a44a33cfa1 100644
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
@@ -15,10 +15,10 @@
#include <linux/mm.h>
#include <linux/module.h>
@@ -4561,7 +5457,7 @@ diff -Nur linux-4.1.26.orig/arch/xtensa/mm/fault.c linux-4.1.26/arch/xtensa/mm/f
#include <asm/pgalloc.h>
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
-@@ -57,7 +57,7 @@
+@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
/* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@@ -4570,10 +5466,11 @@ diff -Nur linux-4.1.26.orig/arch/xtensa/mm/fault.c linux-4.1.26/arch/xtensa/mm/f
bad_page_fault(regs, address, SIGSEGV);
return;
}
-diff -Nur linux-4.1.26.orig/arch/xtensa/mm/highmem.c linux-4.1.26/arch/xtensa/mm/highmem.c
---- linux-4.1.26.orig/arch/xtensa/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/arch/xtensa/mm/highmem.c 2016-06-19 15:30:58.623294881 +0200
-@@ -42,6 +42,7 @@
+diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
+index 8cfb71ec0937..184ceadccc1a 100644
+--- a/arch/xtensa/mm/highmem.c
++++ b/arch/xtensa/mm/highmem.c
+@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
enum fixed_addresses idx;
unsigned long vaddr;
@@ -4581,7 +5478,7 @@ diff -Nur linux-4.1.26.orig/arch/xtensa/mm/highmem.c linux-4.1.26/arch/xtensa/mm
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -79,6 +80,7 @@
+@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
}
pagefault_enable();
@@ -4589,10 +5486,11 @@ diff -Nur linux-4.1.26.orig/arch/xtensa/mm/highmem.c linux-4.1.26/arch/xtensa/mm
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
---- linux-4.1.26.orig/block/blk-core.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-core.c 2016-06-19 15:30:58.623294881 +0200
-@@ -100,6 +100,9 @@
+diff --git a/block/blk-core.c b/block/blk-core.c
+index bbbf36e6066b..24935f6ca5bb 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -100,6 +100,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
@@ -4602,7 +5500,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
-@@ -194,7 +197,7 @@
+@@ -194,7 +197,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
@@ -4611,7 +5509,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
-@@ -661,7 +664,7 @@
+@@ -663,7 +666,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -4620,7 +5518,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
if (blkcg_init_queue(q))
goto fail_bdi;
-@@ -3077,7 +3080,7 @@
+@@ -3079,7 +3082,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -4629,7 +5527,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3125,7 +3128,6 @@
+@@ -3127,7 +3130,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -4637,7 +5535,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3145,11 +3147,6 @@
+@@ -3147,11 +3149,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
q = NULL;
depth = 0;
@@ -4649,7 +5547,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3162,7 +3159,7 @@
+@@ -3164,7 +3161,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -4658,7 +5556,7 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
}
/*
-@@ -3189,8 +3186,6 @@
+@@ -3191,8 +3188,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (q)
queue_unplugged(q, depth, from_schedule);
@@ -4667,9 +5565,10 @@ diff -Nur linux-4.1.26.orig/block/blk-core.c linux-4.1.26/block/blk-core.c
}
void blk_finish_plug(struct blk_plug *plug)
-diff -Nur linux-4.1.26.orig/block/blk-ioc.c linux-4.1.26/block/blk-ioc.c
---- linux-4.1.26.orig/block/blk-ioc.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-ioc.c 2016-06-19 15:30:58.623294881 +0200
+diff --git a/block/blk-ioc.c b/block/blk-ioc.c
+index 1a27f45ec776..28f467e636cc 100644
+--- a/block/blk-ioc.c
++++ b/block/blk-ioc.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -4678,7 +5577,7 @@ diff -Nur linux-4.1.26.orig/block/blk-ioc.c linux-4.1.26/block/blk-ioc.c
#include "blk.h"
-@@ -109,7 +110,7 @@
+@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -4687,7 +5586,7 @@ diff -Nur linux-4.1.26.orig/block/blk-ioc.c linux-4.1.26/block/blk-ioc.c
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
-@@ -187,7 +188,7 @@
+@@ -187,7 +188,7 @@ retry:
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -4696,10 +5595,11 @@ diff -Nur linux-4.1.26.orig/block/blk-ioc.c linux-4.1.26/block/blk-ioc.c
goto retry;
}
}
-diff -Nur linux-4.1.26.orig/block/blk-iopoll.c linux-4.1.26/block/blk-iopoll.c
---- linux-4.1.26.orig/block/blk-iopoll.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-iopoll.c 2016-06-19 15:30:58.623294881 +0200
-@@ -35,6 +35,7 @@
+diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
+index 0736729d6494..3e21e31d0d7e 100644
+--- a/block/blk-iopoll.c
++++ b/block/blk-iopoll.c
+@@ -35,6 +35,7 @@ void blk_iopoll_sched(struct blk_iopoll *iop)
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_restore(flags);
@@ -4707,7 +5607,7 @@ diff -Nur linux-4.1.26.orig/block/blk-iopoll.c linux-4.1.26/block/blk-iopoll.c
}
EXPORT_SYMBOL(blk_iopoll_sched);
-@@ -132,6 +133,7 @@
+@@ -132,6 +133,7 @@ static void blk_iopoll_softirq(struct softirq_action *h)
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
@@ -4715,7 +5615,7 @@ diff -Nur linux-4.1.26.orig/block/blk-iopoll.c linux-4.1.26/block/blk-iopoll.c
}
/**
-@@ -201,6 +203,7 @@
+@@ -201,6 +203,7 @@ static int blk_iopoll_cpu_notify(struct notifier_block *self,
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
local_irq_enable();
@@ -4723,10 +5623,66 @@ diff -Nur linux-4.1.26.orig/block/blk-iopoll.c linux-4.1.26/block/blk-iopoll.c
}
return NOTIFY_OK;
-diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
---- linux-4.1.26.orig/block/blk-mq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-mq.c 2016-06-19 15:30:58.623294881 +0200
-@@ -88,7 +88,7 @@
+diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
+index bb3ed488f7b5..628c6c13c482 100644
+--- a/block/blk-mq-cpu.c
++++ b/block/blk-mq-cpu.c
+@@ -16,7 +16,7 @@
+ #include "blk-mq.h"
+
+ static LIST_HEAD(blk_mq_cpu_notify_list);
+-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
++static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+
+ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+@@ -25,7 +25,10 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ struct blk_mq_cpu_notifier *notify;
+ int ret = NOTIFY_OK;
+
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ if (action != CPU_POST_DEAD)
++ return NOTIFY_OK;
++
++ spin_lock(&blk_mq_cpu_notify_lock);
+
+ list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
+ ret = notify->notify(notify->data, action, cpu);
+@@ -33,7 +36,7 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ break;
+ }
+
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ return ret;
+ }
+
+@@ -41,16 +44,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+ {
+ BUG_ON(!notifier->notify);
+
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ spin_lock(&blk_mq_cpu_notify_lock);
+ list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ }
+
+ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+ {
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ spin_lock(&blk_mq_cpu_notify_lock);
+ list_del(&notifier->list);
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ }
+
+ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 2dc1fd6c5bdb..c473bd192a41 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -88,7 +88,7 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
if (!(gfp & __GFP_WAIT))
return -EBUSY;
@@ -4735,7 +5691,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
!q->mq_freeze_depth || blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
-@@ -107,7 +107,7 @@
+@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, mq_usage_counter);
@@ -4744,7 +5700,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
}
void blk_mq_freeze_queue_start(struct request_queue *q)
-@@ -127,7 +127,7 @@
+@@ -127,7 +127,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -4753,7 +5709,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
}
/*
-@@ -151,7 +151,7 @@
+@@ -151,7 +151,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
if (wake) {
percpu_ref_reinit(&q->mq_usage_counter);
@@ -4762,7 +5718,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -170,7 +170,7 @@
+@@ -170,7 +170,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
@@ -4771,7 +5727,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
-@@ -217,6 +217,9 @@
+@@ -217,6 +217,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->resid_len = 0;
rq->sense = NULL;
@@ -4781,7 +5737,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
-@@ -346,6 +349,17 @@
+@@ -346,6 +349,17 @@ void blk_mq_end_request(struct request *rq, int error)
}
EXPORT_SYMBOL(blk_mq_end_request);
@@ -4799,7 +5755,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
-@@ -353,6 +367,8 @@
+@@ -353,6 +367,8 @@ static void __blk_mq_complete_request_remote(void *data)
rq->q->softirq_done_fn(rq);
}
@@ -4808,7 +5764,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -364,19 +380,23 @@
+@@ -364,19 +380,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
return;
}
@@ -4834,7 +5790,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
}
void __blk_mq_complete_request(struct request *rq)
-@@ -905,14 +925,14 @@
+@@ -905,14 +925,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
return;
if (!async) {
@@ -4852,7 +5808,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
}
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-@@ -1589,7 +1609,7 @@
+@@ -1589,7 +1609,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
{
struct blk_mq_hw_ctx *hctx = data;
@@ -4861,64 +5817,11 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.c linux-4.1.26/block/blk-mq.c
return blk_mq_hctx_cpu_offline(hctx, cpu);
/*
-diff -Nur linux-4.1.26.orig/block/blk-mq-cpu.c linux-4.1.26/block/blk-mq-cpu.c
---- linux-4.1.26.orig/block/blk-mq-cpu.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-mq-cpu.c 2016-06-19 15:30:58.623294881 +0200
-@@ -16,7 +16,7 @@
- #include "blk-mq.h"
-
- static LIST_HEAD(blk_mq_cpu_notify_list);
--static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
-+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
-
- static int blk_mq_main_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-@@ -25,7 +25,10 @@
- struct blk_mq_cpu_notifier *notify;
- int ret = NOTIFY_OK;
-
-- raw_spin_lock(&blk_mq_cpu_notify_lock);
-+ if (action != CPU_POST_DEAD)
-+ return NOTIFY_OK;
-+
-+ spin_lock(&blk_mq_cpu_notify_lock);
-
- list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
- ret = notify->notify(notify->data, action, cpu);
-@@ -33,7 +36,7 @@
- break;
- }
-
-- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+ spin_unlock(&blk_mq_cpu_notify_lock);
- return ret;
- }
-
-@@ -41,16 +44,16 @@
- {
- BUG_ON(!notifier->notify);
-
-- raw_spin_lock(&blk_mq_cpu_notify_lock);
-+ spin_lock(&blk_mq_cpu_notify_lock);
- list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
-- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+ spin_unlock(&blk_mq_cpu_notify_lock);
- }
-
- void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
- {
-- raw_spin_lock(&blk_mq_cpu_notify_lock);
-+ spin_lock(&blk_mq_cpu_notify_lock);
- list_del(&notifier->list);
-- raw_spin_unlock(&blk_mq_cpu_notify_lock);
-+ spin_unlock(&blk_mq_cpu_notify_lock);
- }
-
- void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
-diff -Nur linux-4.1.26.orig/block/blk-mq.h linux-4.1.26/block/blk-mq.h
---- linux-4.1.26.orig/block/blk-mq.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-mq.h 2016-06-19 15:30:58.623294881 +0200
-@@ -76,7 +76,10 @@
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 6a48c4c0d8a2..4b7cbf0e6e82 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -76,7 +76,10 @@ struct blk_align_bitmap {
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
@@ -4930,7 +5833,7 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.h linux-4.1.26/block/blk-mq.h
}
/*
-@@ -87,12 +90,12 @@
+@@ -87,12 +90,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
@@ -4945,10 +5848,11 @@ diff -Nur linux-4.1.26.orig/block/blk-mq.h linux-4.1.26/block/blk-mq.h
}
struct blk_mq_alloc_data {
-diff -Nur linux-4.1.26.orig/block/blk-softirq.c linux-4.1.26/block/blk-softirq.c
---- linux-4.1.26.orig/block/blk-softirq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/blk-softirq.c 2016-06-19 15:30:58.623294881 +0200
-@@ -51,6 +51,7 @@
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 53b1737e978d..81c3c0a62edf 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -4956,7 +5860,7 @@ diff -Nur linux-4.1.26.orig/block/blk-softirq.c linux-4.1.26/block/blk-softirq.c
}
/*
-@@ -93,6 +94,7 @@
+@@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
@@ -4964,7 +5868,7 @@ diff -Nur linux-4.1.26.orig/block/blk-softirq.c linux-4.1.26/block/blk-softirq.c
}
return NOTIFY_OK;
-@@ -150,6 +152,7 @@
+@@ -150,6 +152,7 @@ do_local:
goto do_local;
local_irq_restore(flags);
@@ -4972,10 +5876,11 @@ diff -Nur linux-4.1.26.orig/block/blk-softirq.c linux-4.1.26/block/blk-softirq.c
}
/**
-diff -Nur linux-4.1.26.orig/block/bounce.c linux-4.1.26/block/bounce.c
---- linux-4.1.26.orig/block/bounce.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/block/bounce.c 2016-06-19 15:30:58.623294881 +0200
-@@ -54,11 +54,11 @@
+diff --git a/block/bounce.c b/block/bounce.c
+index ed9dd8067120..39d123e0a989 100644
+--- a/block/bounce.c
++++ b/block/bounce.c
+@@ -54,11 +54,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
unsigned long flags;
unsigned char *vto;
@@ -4989,10 +5894,11 @@ diff -Nur linux-4.1.26.orig/block/bounce.c linux-4.1.26/block/bounce.c
}
#else /* CONFIG_HIGHMEM */
-diff -Nur linux-4.1.26.orig/crypto/algapi.c linux-4.1.26/crypto/algapi.c
---- linux-4.1.26.orig/crypto/algapi.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/crypto/algapi.c 2016-06-19 15:30:58.623294881 +0200
-@@ -695,13 +695,13 @@
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index dda720c6ab08..1629b110dabd 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -695,13 +695,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
@@ -5008,10 +5914,11 @@ diff -Nur linux-4.1.26.orig/crypto/algapi.c linux-4.1.26/crypto/algapi.c
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-diff -Nur linux-4.1.26.orig/crypto/api.c linux-4.1.26/crypto/api.c
---- linux-4.1.26.orig/crypto/api.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/crypto/api.c 2016-06-19 15:30:58.623294881 +0200
-@@ -31,7 +31,7 @@
+diff --git a/crypto/api.c b/crypto/api.c
+index bbc147cb5dec..bc1a848f02ec 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
@@ -5020,7 +5927,7 @@ diff -Nur linux-4.1.26.orig/crypto/api.c linux-4.1.26/crypto/api.c
EXPORT_SYMBOL_GPL(crypto_chain);
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
-@@ -236,10 +236,10 @@
+@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
{
int ok;
@@ -5033,10 +5940,11 @@ diff -Nur linux-4.1.26.orig/crypto/api.c linux-4.1.26/crypto/api.c
}
return ok;
-diff -Nur linux-4.1.26.orig/crypto/internal.h linux-4.1.26/crypto/internal.h
---- linux-4.1.26.orig/crypto/internal.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/crypto/internal.h 2016-06-19 15:30:58.623294881 +0200
-@@ -48,7 +48,7 @@
+diff --git a/crypto/internal.h b/crypto/internal.h
+index bd39bfc92eab..a5db167cba84 100644
+--- a/crypto/internal.h
++++ b/crypto/internal.h
+@@ -48,7 +48,7 @@ struct crypto_larval {
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
@@ -5045,7 +5953,7 @@ diff -Nur linux-4.1.26.orig/crypto/internal.h linux-4.1.26/crypto/internal.h
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
-@@ -142,7 +142,7 @@
+@@ -142,7 +142,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
static inline void crypto_notify(unsigned long val, void *v)
{
@@ -5054,291 +5962,11 @@ diff -Nur linux-4.1.26.orig/crypto/internal.h linux-4.1.26/crypto/internal.h
}
#endif /* _CRYPTO_INTERNAL_H */
-diff -Nur linux-4.1.26.orig/Documentation/hwlat_detector.txt linux-4.1.26/Documentation/hwlat_detector.txt
---- linux-4.1.26.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/Documentation/hwlat_detector.txt 2016-06-19 15:30:54.915151887 +0200
-@@ -0,0 +1,64 @@
-+Introduction:
-+-------------
-+
-+The module hwlat_detector is a special purpose kernel module that is used to
-+detect large system latencies induced by the behavior of certain underlying
-+hardware or firmware, independent of Linux itself. The code was developed
-+originally to detect SMIs (System Management Interrupts) on x86 systems,
-+however there is nothing x86 specific about this patchset. It was
-+originally written for use by the "RT" patch since the Real Time
-+kernel is highly latency sensitive.
-+
-+SMIs are usually not serviced by the Linux kernel, which typically does not
-+even know that they are occuring. SMIs are instead are set up by BIOS code
-+and are serviced by BIOS code, usually for "critical" events such as
-+management of thermal sensors and fans. Sometimes though, SMIs are used for
-+other tasks and those tasks can spend an inordinate amount of time in the
-+handler (sometimes measured in milliseconds). Obviously this is a problem if
-+you are trying to keep event service latencies down in the microsecond range.
-+
-+The hardware latency detector works by hogging all of the cpus for configurable
-+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
-+for some period, then looking for gaps in the TSC data. Any gap indicates a
-+time when the polling was interrupted and since the machine is stopped and
-+interrupts turned off the only thing that could do that would be an SMI.
-+
-+Note that the SMI detector should *NEVER* be used in a production environment.
-+It is intended to be run manually to determine if the hardware platform has a
-+problem with long system firmware service routines.
-+
-+Usage:
-+------
-+
-+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
-+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
-+step required to start the hwlat_detector. It is possible to redefine the
-+threshold in microseconds (us) above which latency spikes will be taken
-+into account (parameter "threshold=").
-+
-+Example:
-+
-+ # modprobe hwlat_detector enabled=1 threshold=100
-+
-+After the module is loaded, it creates a directory named "hwlat_detector" under
-+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
-+to have debugfs mounted, which might be on /sys/debug on your system.
-+
-+The /debug/hwlat_detector interface contains the following files:
-+
-+count - number of latency spikes observed since last reset
-+enable - a global enable/disable toggle (0/1), resets count
-+max - maximum hardware latency actually observed (usecs)
-+sample - a pipe from which to read current raw sample data
-+ in the format <timestamp> <latency observed usecs>
-+ (can be opened O_NONBLOCK for a single sample)
-+threshold - minimum latency value to be considered (usecs)
-+width - time period to sample with CPUs held (usecs)
-+ must be less than the total window size (enforced)
-+window - total period of sampling, width being inside (usecs)
-+
-+By default we will set width to 500,000 and window to 1,000,000, meaning that
-+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
-+observe any latencies that exceed the threshold (initially 100 usecs),
-+then we write to a global sample ring buffer of 8K samples, which is
-+consumed by reading from the "sample" (pipe) debugfs file interface.
-diff -Nur linux-4.1.26.orig/Documentation/sysrq.txt linux-4.1.26/Documentation/sysrq.txt
---- linux-4.1.26.orig/Documentation/sysrq.txt 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/Documentation/sysrq.txt 2016-06-19 15:30:54.915151887 +0200
-@@ -59,10 +59,17 @@
- On other - If you know of the key combos for other architectures, please
- let me know so I can add them to this section.
-
--On all - write a character to /proc/sysrq-trigger. e.g.:
--
-+On all - write a character to /proc/sysrq-trigger, e.g.:
- echo t > /proc/sysrq-trigger
-
-+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
-+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
-+ Send an ICMP echo request with this pattern plus the particular
-+ SysRq command key. Example:
-+ # ping -c1 -s57 -p0102030468
-+ will trigger the SysRq-H (help) command.
-+
-+
- * What are the 'command' keys?
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'b' - Will immediately reboot the system without syncing or unmounting
-diff -Nur linux-4.1.26.orig/Documentation/trace/histograms.txt linux-4.1.26/Documentation/trace/histograms.txt
---- linux-4.1.26.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/Documentation/trace/histograms.txt 2016-06-19 15:30:54.915151887 +0200
-@@ -0,0 +1,186 @@
-+ Using the Linux Kernel Latency Histograms
-+
-+
-+This document gives a short explanation how to enable, configure and use
-+latency histograms. Latency histograms are primarily relevant in the
-+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
-+and are used in the quality management of the Linux real-time
-+capabilities.
-+
-+
-+* Purpose of latency histograms
-+
-+A latency histogram continuously accumulates the frequencies of latency
-+data. There are two types of histograms
-+- potential sources of latencies
-+- effective latencies
-+
-+
-+* Potential sources of latencies
-+
-+Potential sources of latencies are code segments where interrupts,
-+preemption or both are disabled (aka critical sections). To create
-+histograms of potential sources of latency, the kernel stores the time
-+stamp at the start of a critical section, determines the time elapsed
-+when the end of the section is reached, and increments the frequency
-+counter of that latency value - irrespective of whether any concurrently
-+running process is affected by latency or not.
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+ CONFIG_INTERRUPT_OFF_LATENCY
-+ CONFIG_PREEMPT_OFF_LATENCY
-+
-+
-+* Effective latencies
-+
-+Effective latencies are actually occuring during wakeup of a process. To
-+determine effective latencies, the kernel stores the time stamp when a
-+process is scheduled to be woken up, and determines the duration of the
-+wakeup time shortly before control is passed over to this process. Note
-+that the apparent latency in user space may be somewhat longer, since the
-+process may be interrupted after control is passed over to it but before
-+the execution in user space takes place. Simply measuring the interval
-+between enqueuing and wakeup may also not appropriate in cases when a
-+process is scheduled as a result of a timer expiration. The timer may have
-+missed its deadline, e.g. due to disabled interrupts, but this latency
-+would not be registered. Therefore, the offsets of missed timers are
-+recorded in a separate histogram. If both wakeup latency and missed timer
-+offsets are configured and enabled, a third histogram may be enabled that
-+records the overall latency as a sum of the timer latency, if any, and the
-+wakeup latency. This histogram is called "timerandwakeup".
-+- Configuration items (in the Kernel hacking/Tracers submenu)
-+ CONFIG_WAKEUP_LATENCY
-+ CONFIG_MISSED_TIMER_OFSETS
-+
-+
-+* Usage
-+
-+The interface to the administration of the latency histograms is located
-+in the debugfs file system. To mount it, either enter
-+
-+mount -t sysfs nodev /sys
-+mount -t debugfs nodev /sys/kernel/debug
-+
-+from shell command line level, or add
-+
-+nodev /sys sysfs defaults 0 0
-+nodev /sys/kernel/debug debugfs defaults 0 0
-+
-+to the file /etc/fstab. All latency histogram related files are then
-+available in the directory /sys/kernel/debug/tracing/latency_hist. A
-+particular histogram type is enabled by writing non-zero to the related
-+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
-+Select "preemptirqsoff" for the histograms of potential sources of
-+latencies and "wakeup" for histograms of effective latencies etc. The
-+histogram data - one per CPU - are available in the files
-+
-+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
-+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
-+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
-+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
-+
-+The histograms are reset by writing non-zero to the file "reset" in a
-+particular latency directory. To reset all latency data, use
-+
-+#!/bin/sh
-+
-+TRACINGDIR=/sys/kernel/debug/tracing
-+HISTDIR=$TRACINGDIR/latency_hist
-+
-+if test -d $HISTDIR
-+then
-+ cd $HISTDIR
-+ for i in `find . | grep /reset$`
-+ do
-+ echo 1 >$i
-+ done
-+fi
-+
-+
-+* Data format
-+
-+Latency data are stored with a resolution of one microsecond. The
-+maximum latency is 10,240 microseconds. The data are only valid, if the
-+overflow register is empty. Every output line contains the latency in
-+microseconds in the first row and the number of samples in the second
-+row. To display only lines with a positive latency count, use, for
-+example,
-+
-+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
-+
-+#Minimum latency: 0 microseconds.
-+#Average latency: 0 microseconds.
-+#Maximum latency: 25 microseconds.
-+#Total samples: 3104770694
-+#There are 0 samples greater or equal than 10240 microseconds
-+#usecs samples
-+ 0 2984486876
-+ 1 49843506
-+ 2 58219047
-+ 3 5348126
-+ 4 2187960
-+ 5 3388262
-+ 6 959289
-+ 7 208294
-+ 8 40420
-+ 9 4485
-+ 10 14918
-+ 11 18340
-+ 12 25052
-+ 13 19455
-+ 14 5602
-+ 15 969
-+ 16 47
-+ 17 18
-+ 18 14
-+ 19 1
-+ 20 3
-+ 21 2
-+ 22 5
-+ 23 2
-+ 25 1
-+
-+
-+* Wakeup latency of a selected process
-+
-+To only collect wakeup latency data of a particular process, write the
-+PID of the requested process to
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/pid
-+
-+PIDs are not considered, if this variable is set to 0.
-+
-+
-+* Details of the process with the highest wakeup latency so far
-+
-+Selected data of the process that suffered from the highest wakeup
-+latency that occurred in a particular CPU are available in the file
-+
-+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
-+
-+In addition, other relevant system data at the time when the
-+latency occurred are given.
-+
-+The format of the data is (all in one line):
-+<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
-+<- <PID> <Priority> <Command> <Timestamp>
-+
-+The value of <Timeroffset> is only relevant in the combined timer
-+and wakeup latency recording. In the wakeup recording, it is
-+always 0, in the missed_timer_offsets recording, it is the same
-+as <Latency>.
-+
-+When retrospectively searching for the origin of a latency and
-+tracing was not enabled, it may be helpful to know the name and
-+some basic data of the task that (finally) was switching to the
-+late real-tlme task. In addition to the victim's data, also the
-+data of the possible culprit are therefore displayed after the
-+"<-" symbol.
-+
-+Finally, the timestamp of the time when the latency occurred
-+in <seconds>.<microseconds> after the most recent system boot
-+is provided.
-+
-+These data are also reset when the wakeup histogram is reset.
-diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/acglobal.h linux-4.1.26/drivers/acpi/acpica/acglobal.h
---- linux-4.1.26.orig/drivers/acpi/acpica/acglobal.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/acpi/acpica/acglobal.h 2016-06-19 15:30:58.627295036 +0200
-@@ -112,7 +112,7 @@
+diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
+index a0c478784314..166ee955405f 100644
+--- a/drivers/acpi/acpica/acglobal.h
++++ b/drivers/acpi/acpica/acglobal.h
+@@ -112,7 +112,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
* interrupt level
*/
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
@@ -5347,10 +5975,11 @@ diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/acglobal.h linux-4.1.26/drivers/
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/hwregs.c linux-4.1.26/drivers/acpi/acpica/hwregs.c
---- linux-4.1.26.orig/drivers/acpi/acpica/hwregs.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/acpi/acpica/hwregs.c 2016-06-19 15:30:58.627295036 +0200
-@@ -269,14 +269,14 @@
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
+index 3cf77afd142c..dc32e72132f1 100644
+--- a/drivers/acpi/acpica/hwregs.c
++++ b/drivers/acpi/acpica/hwregs.c
+@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
@@ -5367,10 +5996,11 @@ diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/hwregs.c linux-4.1.26/drivers/ac
if (ACPI_FAILURE(status)) {
goto exit;
-diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/hwxface.c linux-4.1.26/drivers/acpi/acpica/hwxface.c
---- linux-4.1.26.orig/drivers/acpi/acpica/hwxface.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/acpi/acpica/hwxface.c 2016-06-19 15:30:58.627295036 +0200
-@@ -374,7 +374,7 @@
+diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
+index 5f97468df8ff..8c017f15da7d 100644
+--- a/drivers/acpi/acpica/hwxface.c
++++ b/drivers/acpi/acpica/hwxface.c
+@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -5379,7 +6009,7 @@ diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/hwxface.c linux-4.1.26/drivers/a
/*
* At this point, we know that the parent register is one of the
-@@ -435,7 +435,7 @@
+@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
unlock_and_exit:
@@ -5388,10 +6018,11 @@ diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/hwxface.c linux-4.1.26/drivers/a
return_ACPI_STATUS(status);
}
-diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/utmutex.c linux-4.1.26/drivers/acpi/acpica/utmutex.c
---- linux-4.1.26.orig/drivers/acpi/acpica/utmutex.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/acpi/acpica/utmutex.c 2016-06-19 15:30:58.627295036 +0200
-@@ -88,7 +88,7 @@
+diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
+index 37b8b58fcd56..938795507d87 100644
+--- a/drivers/acpi/acpica/utmutex.c
++++ b/drivers/acpi/acpica/utmutex.c
+@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
return_ACPI_STATUS (status);
}
@@ -5400,7 +6031,7 @@ diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/utmutex.c linux-4.1.26/drivers/a
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
-@@ -141,7 +141,7 @@
+@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
@@ -5409,10 +6040,11 @@ diff -Nur linux-4.1.26.orig/drivers/acpi/acpica/utmutex.c linux-4.1.26/drivers/a
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
-diff -Nur linux-4.1.26.orig/drivers/ata/libata-sff.c linux-4.1.26/drivers/ata/libata-sff.c
---- linux-4.1.26.orig/drivers/ata/libata-sff.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ata/libata-sff.c 2016-06-19 15:30:58.627295036 +0200
-@@ -678,9 +678,9 @@
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 7dbba387d12a..65beb7abb4e7 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
unsigned long flags;
unsigned int consumed;
@@ -5424,7 +6056,7 @@ diff -Nur linux-4.1.26.orig/drivers/ata/libata-sff.c linux-4.1.26/drivers/ata/li
return consumed;
}
-@@ -719,7 +719,7 @@
+@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned long flags;
/* FIXME: use a bounce buffer */
@@ -5433,7 +6065,7 @@ diff -Nur linux-4.1.26.orig/drivers/ata/libata-sff.c linux-4.1.26/drivers/ata/li
buf = kmap_atomic(page);
/* do the actual data transfer */
-@@ -727,7 +727,7 @@
+@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
kunmap_atomic(buf);
@@ -5442,7 +6074,7 @@ diff -Nur linux-4.1.26.orig/drivers/ata/libata-sff.c linux-4.1.26/drivers/ata/li
} else {
buf = page_address(page);
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
-@@ -864,7 +864,7 @@
+@@ -864,7 +864,7 @@ next_sg:
unsigned long flags;
/* FIXME: use bounce buffer */
@@ -5451,7 +6083,7 @@ diff -Nur linux-4.1.26.orig/drivers/ata/libata-sff.c linux-4.1.26/drivers/ata/li
buf = kmap_atomic(page);
/* do the actual data transfer */
-@@ -872,7 +872,7 @@
+@@ -872,7 +872,7 @@ next_sg:
count, rw);
kunmap_atomic(buf);
@@ -5460,10 +6092,175 @@ diff -Nur linux-4.1.26.orig/drivers/ata/libata-sff.c linux-4.1.26/drivers/ata/li
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
-diff -Nur linux-4.1.26.orig/drivers/char/random.c linux-4.1.26/drivers/char/random.c
---- linux-4.1.26.orig/drivers/char/random.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/char/random.c 2016-06-19 15:30:58.627295036 +0200
-@@ -776,8 +776,6 @@
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 6e134f4759c0..d2782d492630 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -386,6 +386,8 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+ goto out_error;
+ }
+
++ zram_meta_init_table_locks(meta, disksize);
++
+ return meta;
+
+ out_error:
+@@ -484,12 +486,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+ unsigned long handle;
+ size_t size;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ handle = meta->table[index].handle;
+ size = zram_get_obj_size(meta, index);
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ clear_page(mem);
+ return 0;
+ }
+@@ -500,7 +502,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+ else
+ ret = zcomp_decompress(zram->comp, cmem, size, mem);
+ zs_unmap_object(meta->mem_pool, handle);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret)) {
+@@ -520,14 +522,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ struct zram_meta *meta = zram->meta;
+ page = bvec->bv_page;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ handle_zero_page(bvec);
+ return 0;
+ }
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ if (is_partial_io(bvec))
+ /* Use a temporary buffer to decompress the page */
+@@ -622,10 +624,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ if (user_mem)
+ kunmap_atomic(user_mem);
+ /* Free memory associated with this sector now. */
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+ zram_set_flag(meta, index, ZRAM_ZERO);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ atomic64_inc(&zram->stats.zero_pages);
+ ret = 0;
+@@ -685,12 +687,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+
+ meta->table[index].handle = handle;
+ zram_set_obj_size(meta, index, clen);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ /* Update stats */
+ atomic64_add(clen, &zram->stats.compr_data_size);
+@@ -762,9 +764,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
+ }
+
+ while (n >= PAGE_SIZE) {
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ atomic64_inc(&zram->stats.notify_free);
+ index++;
+ n -= PAGE_SIZE;
+@@ -1007,9 +1009,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ zram = bdev->bd_disk->private_data;
+ meta = zram->meta;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ atomic64_inc(&zram->stats.notify_free);
+ }
+
+diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
+index 570c598f4ce9..22c0173b00e3 100644
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -78,6 +78,9 @@ enum zram_pageflags {
+ struct zram_table_entry {
+ unsigned long handle;
+ unsigned long value;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t lock;
++#endif
+ };
+
+ struct zram_stats {
+@@ -122,4 +125,42 @@ struct zram {
+ u64 disksize; /* bytes */
+ char compressor[10];
+ };
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++static inline void zram_lock_table(struct zram_table_entry *table)
++{
++ bit_spin_lock(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_unlock_table(struct zram_table_entry *table)
++{
++ bit_spin_unlock(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
++#else /* CONFIG_PREEMPT_RT_BASE */
++static inline void zram_lock_table(struct zram_table_entry *table)
++{
++ spin_lock(&table->lock);
++ __set_bit(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_unlock_table(struct zram_table_entry *table)
++{
++ __clear_bit(ZRAM_ACCESS, &table->value);
++ spin_unlock(&table->lock);
++}
++
++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
++{
++ size_t num_pages = disksize >> PAGE_SHIFT;
++ size_t index;
++
++ for (index = 0; index < num_pages; index++) {
++ spinlock_t *lock = &meta->table[index].lock;
++ spin_lock_init(lock);
++ }
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ #endif
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 9cd6968e2f92..eb47efec2506 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -776,8 +776,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
@@ -5472,7 +6269,7 @@ diff -Nur linux-4.1.26.orig/drivers/char/random.c linux-4.1.26/drivers/char/rand
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
-@@ -818,7 +816,6 @@
+@@ -818,7 +816,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
@@ -5480,7 +6277,7 @@ diff -Nur linux-4.1.26.orig/drivers/char/random.c linux-4.1.26/drivers/char/rand
}
void add_input_randomness(unsigned int type, unsigned int code,
-@@ -871,28 +868,27 @@
+@@ -871,28 +868,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
return *(ptr + f->reg_idx++);
}
@@ -5514,9 +6311,36 @@ diff -Nur linux-4.1.26.orig/drivers/char/random.c linux-4.1.26/drivers/char/rand
fast_mix(fast_pool);
add_interrupt_bench(cycles);
-diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/drivers/clocksource/tcb_clksrc.c
---- linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/clocksource/tcb_clksrc.c 2016-06-19 15:30:58.627295036 +0200
+diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
+index 3f27d21fb729..b83480f599ce 100644
+--- a/drivers/clk/at91/pmc.c
++++ b/drivers/clk/at91/pmc.c
+@@ -27,21 +27,6 @@
+ void __iomem *at91_pmc_base;
+ EXPORT_SYMBOL_GPL(at91_pmc_base);
+
+-void at91rm9200_idle(void)
+-{
+- /*
+- * Disable the processor clock. The processor will be automatically
+- * re-enabled by an interrupt or by a reset.
+- */
+- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+-}
+-
+-void at91sam9_idle(void)
+-{
+- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+- cpu_do_idle();
+-}
+-
+ int of_at91_get_clk_range(struct device_node *np, const char *propname,
+ struct clk_range *range)
+ {
+diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
+index 8bdbc45c6dad..43f1c6bc6e28 100644
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
@@ -5527,7 +6351,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -74,6 +73,7 @@
+@@ -74,6 +73,7 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
@@ -5535,7 +6359,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
void __iomem *regs;
};
-@@ -82,13 +82,6 @@
+@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
@@ -5549,7 +6373,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
static u32 timer_clock;
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
-@@ -111,11 +104,12 @@
+@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
case CLOCK_EVT_MODE_PERIODIC:
clk_enable(tcd->clk);
@@ -5564,7 +6388,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
/* Enable clock and interrupts on RC compare */
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -128,7 +122,7 @@
+@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
case CLOCK_EVT_MODE_ONESHOT:
clk_enable(tcd->clk);
@@ -5573,7 +6397,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
-@@ -157,8 +151,12 @@
+@@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt = {
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
@@ -5586,7 +6410,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
.set_next_event = tc_next_event,
.set_mode = tc_mode,
},
-@@ -178,8 +176,9 @@
+@@ -178,8 +176,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
return IRQ_NONE;
}
@@ -5597,7 +6421,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
-@@ -193,7 +192,11 @@
+@@ -193,7 +192,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
@@ -5610,7 +6434,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
clkevt.clkevt.cpumask = cpumask_of(0);
-@@ -203,7 +206,7 @@
+@@ -203,7 +206,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
return ret;
}
@@ -5619,7 +6443,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
return ret;
}
-@@ -340,7 +343,11 @@
+@@ -340,7 +343,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
@@ -5631,10 +6455,11 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.26/driver
if (ret)
goto err_unregister_clksrc;
-diff -Nur linux-4.1.26.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.26/drivers/clocksource/timer-atmel-pit.c
---- linux-4.1.26.orig/drivers/clocksource/timer-atmel-pit.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/clocksource/timer-atmel-pit.c 2016-06-19 15:30:58.627295036 +0200
-@@ -90,6 +90,7 @@
+diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
+index c0304ff608b0..6eb7bf435d9b 100644
+--- a/drivers/clocksource/timer-atmel-pit.c
++++ b/drivers/clocksource/timer-atmel-pit.c
+@@ -90,6 +90,7 @@ static cycle_t read_pit_clk(struct clocksource *cs)
return elapsed;
}
@@ -5642,7 +6467,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.26/d
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
-@@ -100,6 +101,8 @@
+@@ -100,6 +101,8 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -5651,7 +6476,7 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.26/d
/* update clocksource counter */
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
pit_write(data->base, AT91_PIT_MR,
-@@ -113,6 +116,7 @@
+@@ -113,6 +116,7 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
/* disable irq, leaving the clocksource active */
pit_write(data->base, AT91_PIT_MR,
(data->cycle - 1) | AT91_PIT_PITEN);
@@ -5659,10 +6484,11 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.26/d
break;
case CLOCK_EVT_MODE_RESUME:
break;
-diff -Nur linux-4.1.26.orig/drivers/clocksource/timer-atmel-st.c linux-4.1.26/drivers/clocksource/timer-atmel-st.c
---- linux-4.1.26.orig/drivers/clocksource/timer-atmel-st.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/clocksource/timer-atmel-st.c 2016-06-19 15:30:58.627295036 +0200
-@@ -131,6 +131,7 @@
+diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
+index 1692e17e096b..306e2051f112 100644
+--- a/drivers/clocksource/timer-atmel-st.c
++++ b/drivers/clocksource/timer-atmel-st.c
+@@ -131,6 +131,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@@ -5670,10 +6496,24 @@ diff -Nur linux-4.1.26.orig/drivers/clocksource/timer-atmel-st.c linux-4.1.26/dr
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
-diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufreq/cpufreq.c
---- linux-4.1.26.orig/drivers/cpufreq/cpufreq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/cpufreq/cpufreq.c 2016-06-19 15:30:58.627295036 +0200
-@@ -64,12 +64,6 @@
+diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
+index c59bdcb83217..8f23161d80be 100644
+--- a/drivers/cpufreq/Kconfig.x86
++++ b/drivers/cpufreq/Kconfig.x86
+@@ -123,7 +123,7 @@ config X86_POWERNOW_K7_ACPI
+
+ config X86_POWERNOW_K8
+ tristate "AMD Opteron/Athlon64 PowerNow!"
+- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
+ help
+ This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+ Support for K10 and newer processors is now in acpi-cpufreq.
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 8ae655c364f4..ce1d93e93d1a 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -64,12 +64,6 @@ static inline bool has_target(void)
return cpufreq_driver->target_index || cpufreq_driver->target;
}
@@ -5686,7 +6526,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event);
-@@ -215,9 +209,6 @@
+@@ -215,9 +209,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (cpu >= nr_cpu_ids)
return NULL;
@@ -5696,7 +6536,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
/* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags);
-@@ -230,9 +221,6 @@
+@@ -230,9 +221,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -5706,7 +6546,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
return policy;
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
-@@ -240,7 +228,6 @@
+@@ -240,7 +228,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
kobject_put(&policy->kobj);
@@ -5714,7 +6554,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-@@ -765,9 +752,6 @@
+@@ -765,9 +752,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
@@ -5724,7 +6564,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
down_read(&policy->rwsem);
if (fattr->show)
-@@ -776,7 +760,6 @@
+@@ -776,7 +760,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = -EIO;
up_read(&policy->rwsem);
@@ -5732,7 +6572,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
return ret;
}
-@@ -793,9 +776,6 @@
+@@ -793,9 +776,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!cpu_online(policy->cpu))
goto unlock;
@@ -5742,7 +6582,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
down_write(&policy->rwsem);
if (fattr->store)
-@@ -804,8 +784,6 @@
+@@ -804,8 +784,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
ret = -EIO;
up_write(&policy->rwsem);
@@ -5751,7 +6591,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
unlock:
put_online_cpus();
-@@ -1117,16 +1095,12 @@
+@@ -1117,16 +1095,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
if (unlikely(policy))
return 0;
@@ -5768,7 +6608,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
return ret;
}
}
-@@ -1269,8 +1243,6 @@
+@@ -1269,8 +1243,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
kobject_uevent(&policy->kobj, KOBJ_ADD);
@@ -5777,7 +6617,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
/* Callback for handling stuff after policy is ready */
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
-@@ -1304,8 +1276,6 @@
+@@ -1304,8 +1276,6 @@ err_set_policy_cpu:
cpufreq_policy_free(policy);
nomem_out:
@@ -5786,7 +6626,7 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
return ret;
}
-@@ -2499,19 +2469,20 @@
+@@ -2499,19 +2469,20 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
@@ -5809,21 +6649,23 @@ diff -Nur linux-4.1.26.orig/drivers/cpufreq/cpufreq.c linux-4.1.26/drivers/cpufr
return 0;
}
-diff -Nur linux-4.1.26.orig/drivers/cpufreq/Kconfig.x86 linux-4.1.26/drivers/cpufreq/Kconfig.x86
---- linux-4.1.26.orig/drivers/cpufreq/Kconfig.x86 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/cpufreq/Kconfig.x86 2016-06-19 15:30:58.627295036 +0200
-@@ -123,7 +123,7 @@
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index c88b01bbf9a3..0fd82b872f63 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -309,7 +309,7 @@ config GPIO_OCTEON
+ family of SOCs.
- config X86_POWERNOW_K8
- tristate "AMD Opteron/Athlon64 PowerNow!"
-- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
-+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
- help
- This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
- Support for K10 and newer processors is now in acpi-cpufreq.
-diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/gpio-omap.c
---- linux-4.1.26.orig/drivers/gpio/gpio-omap.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpio/gpio-omap.c 2016-06-19 15:30:58.631295190 +0200
+ config GPIO_OMAP
+- bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS
++ tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
+ default y if ARCH_OMAP
+ depends on ARM
+ select GENERIC_IRQ_CHIP
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index b232397ad7ec..4916fd726dce 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
#include <linux/platform_data/gpio-omap.h>
@@ -5832,7 +6674,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
static LIST_HEAD(omap_gpio_list);
-@@ -50,14 +51,15 @@
+@@ -50,14 +51,15 @@ struct gpio_regs {
struct gpio_bank {
struct list_head node;
void __iomem *base;
@@ -5850,7 +6692,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
struct gpio_chip chip;
struct clk *dbck;
u32 mod_usage;
-@@ -67,7 +69,7 @@
+@@ -67,7 +69,7 @@ struct gpio_bank {
struct device *dev;
bool is_mpuio;
bool dbck_flag;
@@ -5859,7 +6701,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
bool context_valid;
int stride;
u32 width;
-@@ -175,7 +177,7 @@
+@@ -175,7 +177,7 @@ static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set
static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
@@ -5868,7 +6710,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
bank->dbck_enabled = true;
writel_relaxed(bank->dbck_enable_mask,
-@@ -193,7 +195,7 @@
+@@ -193,7 +195,7 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
*/
writel_relaxed(0, bank->base + bank->regs->debounce_en);
@@ -5877,7 +6719,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
bank->dbck_enabled = false;
}
}
-@@ -204,8 +206,9 @@
+@@ -204,8 +206,9 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
* @offset: the gpio number on this @bank
* @debounce: debounce time to use
*
@@ -5889,7 +6731,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
*/
static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
unsigned debounce)
-@@ -213,34 +216,33 @@
+@@ -213,34 +216,33 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
void __iomem *reg;
u32 val;
u32 l;
@@ -5932,7 +6774,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
/*
* Enable debounce clock per module.
* This call is mandatory because in omap_gpio_request() when
-@@ -285,7 +287,7 @@
+@@ -285,7 +287,7 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
bank->context.debounce = 0;
writel_relaxed(bank->context.debounce, bank->base +
bank->regs->debounce);
@@ -5941,7 +6783,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
bank->dbck_enabled = false;
}
}
-@@ -488,9 +490,6 @@
+@@ -488,9 +490,6 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
unsigned long flags;
unsigned offset = d->hwirq;
@@ -5951,7 +6793,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
-@@ -498,20 +497,28 @@
+@@ -498,20 +497,28 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
return -EINVAL;
@@ -5984,7 +6826,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return retval;
}
-@@ -626,34 +633,30 @@
+@@ -626,34 +633,30 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
return -EINVAL;
}
@@ -6027,7 +6869,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
-@@ -668,17 +671,10 @@
+@@ -668,17 +671,10 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
if (!BANK_USED(bank))
pm_runtime_get_sync(bank->dev);
@@ -6048,7 +6890,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -688,11 +684,14 @@
+@@ -688,11 +684,14 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
unsigned long flags;
@@ -6066,7 +6908,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
/*
* If this is the last gpio to be freed in the bank,
-@@ -711,29 +710,27 @@
+@@ -711,29 +710,27 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
* line's interrupt handler has been run, we may miss some nested
* interrupts.
*/
@@ -6104,7 +6946,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
enabled = omap_get_gpio_irqbank_mask(bank);
isr_saved = isr = readl_relaxed(isr_reg) & enabled;
-@@ -747,12 +744,7 @@
+@@ -747,12 +744,7 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
@@ -6118,7 +6960,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
if (!isr)
break;
-@@ -761,6 +753,7 @@
+@@ -761,6 +753,7 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
bit = __ffs(isr);
isr &= ~(BIT(bit));
@@ -6126,7 +6968,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
/*
* Some chips can't respond to both rising and falling
* at the same time. If this irq was requested with
-@@ -771,18 +764,20 @@
+@@ -771,18 +764,20 @@ static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (bank->toggle_mask & (BIT(bit)))
omap_toggle_gpio_edge_triggering(bank, bit);
@@ -6153,7 +6995,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
static unsigned int omap_gpio_irq_startup(struct irq_data *d)
-@@ -791,15 +786,22 @@
+@@ -791,15 +786,22 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d)
unsigned long flags;
unsigned offset = d->hwirq;
@@ -6181,7 +7023,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
static void omap_gpio_irq_shutdown(struct irq_data *d)
-@@ -808,11 +810,28 @@
+@@ -808,11 +810,28 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
unsigned long flags;
unsigned offset = d->hwirq;
@@ -6213,7 +7055,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
/*
* If this is the last IRQ to be freed in the bank,
-@@ -836,10 +855,10 @@
+@@ -836,10 +855,10 @@ static void omap_gpio_mask_irq(struct irq_data *d)
unsigned offset = d->hwirq;
unsigned long flags;
@@ -6226,7 +7068,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
static void omap_gpio_unmask_irq(struct irq_data *d)
-@@ -849,7 +868,7 @@
+@@ -849,7 +868,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
@@ -6235,7 +7077,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
if (trigger)
omap_set_gpio_triggering(bank, offset, trigger);
-@@ -861,7 +880,7 @@
+@@ -861,7 +880,7 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
}
omap_set_gpio_irqenable(bank, offset, 1);
@@ -6244,7 +7086,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
/*---------------------------------------------------------------------*/
-@@ -874,9 +893,9 @@
+@@ -874,9 +893,9 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
OMAP_MPUIO_GPIO_MASKIT / bank->stride;
unsigned long flags;
@@ -6256,7 +7098,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -889,9 +908,9 @@
+@@ -889,9 +908,9 @@ static int omap_mpuio_resume_noirq(struct device *dev)
OMAP_MPUIO_GPIO_MASKIT / bank->stride;
unsigned long flags;
@@ -6268,7 +7110,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -937,9 +956,9 @@
+@@ -937,9 +956,9 @@ static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
bank = container_of(chip, struct gpio_bank, chip);
reg = bank->base + bank->regs->direction;
@@ -6280,7 +7122,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return dir;
}
-@@ -949,9 +968,9 @@
+@@ -949,9 +968,9 @@ static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
@@ -6292,7 +7134,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -973,10 +992,10 @@
+@@ -973,10 +992,10 @@ static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
@@ -6305,7 +7147,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -988,9 +1007,9 @@
+@@ -988,9 +1007,9 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
bank = container_of(chip, struct gpio_bank, chip);
@@ -6317,7 +7159,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -1001,9 +1020,9 @@
+@@ -1001,9 +1020,9 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
@@ -6329,7 +7171,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
/*---------------------------------------------------------------------*/
-@@ -1048,10 +1067,6 @@
+@@ -1048,10 +1067,6 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
/* Initialize interface clk ungated, module enabled */
if (bank->regs->ctrl)
writel_relaxed(0, base + bank->regs->ctrl);
@@ -6340,7 +7182,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
-@@ -1080,7 +1095,6 @@
+@@ -1080,7 +1095,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
} else {
bank->chip.label = "gpio";
bank->chip.base = gpio;
@@ -6348,7 +7190,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
bank->chip.ngpio = bank->width;
-@@ -1090,6 +1104,9 @@
+@@ -1090,6 +1104,9 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return ret;
}
@@ -6358,7 +7200,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
-@@ -1112,7 +1129,7 @@
+@@ -1112,7 +1129,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
}
ret = gpiochip_irqchip_add(&bank->chip, irqc,
@@ -6367,7 +7209,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
IRQ_TYPE_NONE);
if (ret) {
-@@ -1121,10 +1138,14 @@
+@@ -1121,10 +1138,14 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return -ENODEV;
}
@@ -6385,7 +7227,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
static const struct of_device_id omap_gpio_match[];
-@@ -1163,17 +1184,23 @@
+@@ -1163,17 +1184,23 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_unmask = omap_gpio_unmask_irq,
irqc->irq_set_type = omap_gpio_irq_type,
irqc->irq_set_wake = omap_gpio_wake_enable,
@@ -6414,7 +7256,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
bank->width = pdata->bank_width;
-@@ -1183,15 +1210,9 @@
+@@ -1183,15 +1210,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
#endif
@@ -6433,7 +7275,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
}
if (bank->regs->set_dataout && bank->regs->clr_dataout)
-@@ -1199,16 +1220,27 @@
+@@ -1199,16 +1220,27 @@ static int omap_gpio_probe(struct platform_device *pdev)
else
bank->set_dataout = omap_set_gpio_dataout_mask;
@@ -6463,7 +7305,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
platform_set_drvdata(pdev, bank);
pm_runtime_enable(bank->dev);
-@@ -1221,8 +1253,11 @@
+@@ -1221,8 +1253,11 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_gpio_mod_init(bank);
ret = omap_gpio_chip_init(bank, irqc);
@@ -6476,7 +7318,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
omap_gpio_show_rev(bank);
-@@ -1233,6 +1268,19 @@
+@@ -1233,6 +1268,19 @@ static int omap_gpio_probe(struct platform_device *pdev)
return 0;
}
@@ -6496,7 +7338,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
#ifdef CONFIG_ARCH_OMAP2PLUS
#if defined(CONFIG_PM)
-@@ -1246,7 +1294,7 @@
+@@ -1246,7 +1294,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
unsigned long flags;
u32 wake_low, wake_hi;
@@ -6505,7 +7347,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
/*
* Only edges can generate a wakeup event to the PRCM.
-@@ -1299,7 +1347,7 @@
+@@ -1299,7 +1347,7 @@ update_gpio_context_count:
bank->get_context_loss_count(bank->dev);
omap_gpio_dbck_disable(bank);
@@ -6514,7 +7356,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -1314,14 +1362,14 @@
+@@ -1314,14 +1362,14 @@ static int omap_gpio_runtime_resume(struct device *dev)
unsigned long flags;
int c;
@@ -6531,7 +7373,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
omap_gpio_init_context(bank);
if (bank->get_context_loss_count)
-@@ -1342,22 +1390,20 @@
+@@ -1342,22 +1390,20 @@ static int omap_gpio_runtime_resume(struct device *dev)
writel_relaxed(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
@@ -6562,7 +7404,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
return 0;
}
-@@ -1412,18 +1458,19 @@
+@@ -1412,18 +1458,19 @@ static int omap_gpio_runtime_resume(struct device *dev)
}
bank->workaround_enabled = false;
@@ -6584,7 +7426,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
continue;
bank->power_mode = pwr_mode;
-@@ -1437,12 +1484,13 @@
+@@ -1437,12 +1484,13 @@ void omap2_gpio_resume_after_idle(void)
struct gpio_bank *bank;
list_for_each_entry(bank, &omap_gpio_list, node) {
@@ -6599,7 +7441,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
#if defined(CONFIG_PM)
static void omap_gpio_init_context(struct gpio_bank *p)
-@@ -1598,6 +1646,7 @@
+@@ -1598,6 +1646,7 @@ MODULE_DEVICE_TABLE(of, omap_gpio_match);
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
@@ -6607,7 +7449,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
.driver = {
.name = "omap_gpio",
.pm = &gpio_pm_ops,
-@@ -1615,3 +1664,13 @@
+@@ -1615,3 +1664,13 @@ static int __init omap_gpio_drv_reg(void)
return platform_driver_register(&omap_gpio_driver);
}
postcore_initcall(omap_gpio_drv_reg);
@@ -6621,21 +7463,10 @@ diff -Nur linux-4.1.26.orig/drivers/gpio/gpio-omap.c linux-4.1.26/drivers/gpio/g
+MODULE_DESCRIPTION("omap gpio driver");
+MODULE_ALIAS("platform:gpio-omap");
+MODULE_LICENSE("GPL v2");
-diff -Nur linux-4.1.26.orig/drivers/gpio/Kconfig linux-4.1.26/drivers/gpio/Kconfig
---- linux-4.1.26.orig/drivers/gpio/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpio/Kconfig 2016-06-19 15:30:58.631295190 +0200
-@@ -308,7 +308,7 @@
- family of SOCs.
-
- config GPIO_OMAP
-- bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS
-+ tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
- default y if ARCH_OMAP
- depends on ARM
- select GENERIC_IRQ_CHIP
-diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1.26/drivers/gpu/drm/i915/i915_gem_execbuffer.c
---- linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2016-06-19 15:30:58.631295190 +0200
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 479024a4caad..a67a351e88ab 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_trace.h"
#include "intel_drv.h"
@@ -6644,7 +7475,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
-@@ -465,7 +466,7 @@
+@@ -465,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
/* We can't wait for rendering with pagefaults disabled */
@@ -6653,7 +7484,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1
return -EFAULT;
if (use_cpu_reloc(obj))
-@@ -1338,7 +1339,9 @@
+@@ -1338,7 +1339,9 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret;
}
@@ -6663,22 +7494,24 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.1.26/drivers/gpu/drm/i915/i915_gem_shrinker.c
---- linux-4.1.26.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpu/drm/i915/i915_gem_shrinker.c 2016-06-19 15:30:58.631295190 +0200
-@@ -39,7 +39,7 @@
+diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+index 7ab9cc456dd2..e06515f4eb7c 100644
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
--#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
-+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
++#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
-diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.1.26/drivers/gpu/drm/i915/i915_irq.c
---- linux-4.1.26.orig/drivers/gpu/drm/i915/i915_irq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpu/drm/i915/i915_irq.c 2016-06-19 15:30:58.631295190 +0200
-@@ -676,6 +676,7 @@
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index b0df8d10482a..8d34df020842 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -676,6 +676,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -6686,7 +7519,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.1.26/drivers
/* Get optional system timestamp before query. */
if (stime)
-@@ -727,6 +728,7 @@
+@@ -727,6 +728,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -6694,10 +7527,11 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.1.26/drivers
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_display.c linux-4.1.26/drivers/gpu/drm/i915/intel_display.c
---- linux-4.1.26.orig/drivers/gpu/drm/i915/intel_display.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpu/drm/i915/intel_display.c 2016-06-19 15:30:58.635295344 +0200
-@@ -10084,7 +10084,7 @@
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 56323732c748..d1f1781392bf 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10084,7 +10084,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -6706,9 +7540,10 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_display.c linux-4.1.26/dr
if (crtc == NULL)
return;
-diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.1.26/drivers/gpu/drm/i915/intel_sprite.c
---- linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpu/drm/i915/intel_sprite.c 2016-06-19 15:30:58.635295344 +0200
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
+index a4c0a04b5044..6da459fe20b2 100644
+--- a/drivers/gpu/drm/i915/intel_sprite.c
++++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -6717,7 +7552,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.1.26/dri
static bool
format_is_yuv(uint32_t format)
-@@ -61,6 +62,8 @@
+@@ -61,6 +62,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
}
@@ -6726,7 +7561,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.1.26/dri
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
-@@ -101,7 +104,7 @@
+@@ -101,7 +104,7 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
return false;
@@ -6735,7 +7570,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.1.26/dri
trace_i915_pipe_update_start(crtc, min, max);
-@@ -123,11 +126,11 @@
+@@ -123,11 +126,11 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
break;
}
@@ -6749,7 +7584,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.1.26/dri
}
finish_wait(wq, &wait);
-@@ -158,7 +161,7 @@
+@@ -158,7 +161,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
trace_i915_pipe_update_end(crtc, end_vbl_count);
@@ -6758,10 +7593,11 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.1.26/dri
if (start_vbl_count != end_vbl_count)
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
-diff -Nur linux-4.1.26.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.1.26/drivers/gpu/drm/radeon/radeon_display.c
---- linux-4.1.26.orig/drivers/gpu/drm/radeon/radeon_display.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/gpu/drm/radeon/radeon_display.c 2016-06-19 15:30:58.635295344 +0200
-@@ -1798,6 +1798,7 @@
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 6743174acdbc..8ad198bbc24d 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1798,6 +1798,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
struct radeon_device *rdev = dev->dev_private;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -6769,7 +7605,7 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.1.26
/* Get optional system timestamp before query. */
if (stime)
-@@ -1890,6 +1891,7 @@
+@@ -1890,6 +1891,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -6777,10 +7613,11 @@ diff -Nur linux-4.1.26.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.1.26
/* Decode into vertical and horizontal scanout position. */
*vpos = position & 0x1fff;
-diff -Nur linux-4.1.26.orig/drivers/i2c/busses/i2c-omap.c linux-4.1.26/drivers/i2c/busses/i2c-omap.c
---- linux-4.1.26.orig/drivers/i2c/busses/i2c-omap.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/i2c/busses/i2c-omap.c 2016-06-19 15:30:58.635295344 +0200
-@@ -996,15 +996,12 @@
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 0e894193accf..2f9de5ecb6ed 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -996,15 +996,12 @@ omap_i2c_isr(int irq, void *dev_id)
u16 mask;
u16 stat;
@@ -6797,10 +7634,11 @@ diff -Nur linux-4.1.26.orig/drivers/i2c/busses/i2c-omap.c linux-4.1.26/drivers/i
return ret;
}
-diff -Nur linux-4.1.26.orig/drivers/ide/alim15x3.c linux-4.1.26/drivers/ide/alim15x3.c
---- linux-4.1.26.orig/drivers/ide/alim15x3.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/alim15x3.c 2016-06-19 15:30:58.635295344 +0200
-@@ -234,7 +234,7 @@
+diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
+index 36f76e28a0bf..394f142f90c7 100644
+--- a/drivers/ide/alim15x3.c
++++ b/drivers/ide/alim15x3.c
+@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
@@ -6809,7 +7647,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/alim15x3.c linux-4.1.26/drivers/ide/alim
if (m5229_revision < 0xC2) {
/*
-@@ -325,7 +325,7 @@
+@@ -325,7 +325,7 @@ out:
}
pci_dev_put(north);
pci_dev_put(isa_dev);
@@ -6818,10 +7656,11 @@ diff -Nur linux-4.1.26.orig/drivers/ide/alim15x3.c linux-4.1.26/drivers/ide/alim
return 0;
}
-diff -Nur linux-4.1.26.orig/drivers/ide/hpt366.c linux-4.1.26/drivers/ide/hpt366.c
---- linux-4.1.26.orig/drivers/ide/hpt366.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/hpt366.c 2016-06-19 15:30:58.635295344 +0200
-@@ -1241,7 +1241,7 @@
+diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
+index 696b6c1ec940..0d0a96629b73 100644
+--- a/drivers/ide/hpt366.c
++++ b/drivers/ide/hpt366.c
+@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
dma_old = inb(base + 2);
@@ -6830,7 +7669,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/hpt366.c linux-4.1.26/drivers/ide/hpt366
dma_new = dma_old;
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
-@@ -1252,7 +1252,7 @@
+@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
if (dma_new != dma_old)
outb(dma_new, base + 2);
@@ -6839,40 +7678,11 @@ diff -Nur linux-4.1.26.orig/drivers/ide/hpt366.c linux-4.1.26/drivers/ide/hpt366
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
-diff -Nur linux-4.1.26.orig/drivers/ide/ide-io.c linux-4.1.26/drivers/ide/ide-io.c
---- linux-4.1.26.orig/drivers/ide/ide-io.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/ide-io.c 2016-06-19 15:30:58.635295344 +0200
-@@ -659,7 +659,7 @@
- /* disable_irq_nosync ?? */
- disable_irq(hwif->irq);
- /* local CPU only, as if we were handling an interrupt */
-- local_irq_disable();
-+ local_irq_disable_nort();
- if (hwif->polling) {
- startstop = handler(drive);
- } else if (drive_is_ready(drive)) {
-diff -Nur linux-4.1.26.orig/drivers/ide/ide-iops.c linux-4.1.26/drivers/ide/ide-iops.c
---- linux-4.1.26.orig/drivers/ide/ide-iops.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/ide-iops.c 2016-06-19 15:30:58.635295344 +0200
-@@ -129,12 +129,12 @@
- if ((stat & ATA_BUSY) == 0)
- break;
-
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- *rstat = stat;
- return -EBUSY;
- }
- }
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- /*
- * Allow status to settle, then read it again.
-diff -Nur linux-4.1.26.orig/drivers/ide/ide-io-std.c linux-4.1.26/drivers/ide/ide-io-std.c
---- linux-4.1.26.orig/drivers/ide/ide-io-std.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/ide-io-std.c 2016-06-19 15:30:58.635295344 +0200
-@@ -175,7 +175,7 @@
+diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
+index 19763977568c..4169433faab5 100644
+--- a/drivers/ide/ide-io-std.c
++++ b/drivers/ide/ide-io-std.c
+@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
@@ -6881,7 +7691,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-io-std.c linux-4.1.26/drivers/ide/id
ata_vlb_sync(io_ports->nsect_addr);
}
-@@ -186,7 +186,7 @@
+@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
insl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
@@ -6890,7 +7700,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-io-std.c linux-4.1.26/drivers/ide/id
if (((len + 1) & 3) < 2)
return;
-@@ -219,7 +219,7 @@
+@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
@@ -6899,7 +7709,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-io-std.c linux-4.1.26/drivers/ide/id
ata_vlb_sync(io_ports->nsect_addr);
}
-@@ -230,7 +230,7 @@
+@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
outsl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
@@ -6908,10 +7718,43 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-io-std.c linux-4.1.26/drivers/ide/id
if (((len + 1) & 3) < 2)
return;
-diff -Nur linux-4.1.26.orig/drivers/ide/ide-probe.c linux-4.1.26/drivers/ide/ide-probe.c
---- linux-4.1.26.orig/drivers/ide/ide-probe.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/ide-probe.c 2016-06-19 15:30:58.635295344 +0200
-@@ -196,10 +196,10 @@
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index 177db6d5b2f5..079ae6bebf18 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+- local_irq_disable();
++ local_irq_disable_nort();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
+index 376f2dc410c5..f014dd1b73dc 100644
+--- a/drivers/ide/ide-iops.c
++++ b/drivers/ide/ide-iops.c
+@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
+index 0b63facd1d87..4ceba37afc0c 100644
+--- a/drivers/ide/ide-probe.c
++++ b/drivers/ide/ide-probe.c
+@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
int bswap = 1;
/* local CPU only; some systems need this */
@@ -6924,10 +7767,11 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-probe.c linux-4.1.26/drivers/ide/ide
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
-diff -Nur linux-4.1.26.orig/drivers/ide/ide-taskfile.c linux-4.1.26/drivers/ide/ide-taskfile.c
---- linux-4.1.26.orig/drivers/ide/ide-taskfile.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/ide/ide-taskfile.c 2016-06-19 15:30:58.635295344 +0200
-@@ -250,7 +250,7 @@
+diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
+index dabb88b1cbec..2cecea72520a 100644
+--- a/drivers/ide/ide-taskfile.c
++++ b/drivers/ide/ide-taskfile.c
+@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
page_is_high = PageHighMem(page);
if (page_is_high)
@@ -6936,7 +7780,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-taskfile.c linux-4.1.26/drivers/ide/
buf = kmap_atomic(page) + offset;
-@@ -271,7 +271,7 @@
+@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
kunmap_atomic(buf);
if (page_is_high)
@@ -6945,7 +7789,7 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-taskfile.c linux-4.1.26/drivers/ide/
len -= nr_bytes;
}
-@@ -414,7 +414,7 @@
+@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
}
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
@@ -6954,10 +7798,11 @@ diff -Nur linux-4.1.26.orig/drivers/ide/ide-taskfile.c linux-4.1.26/drivers/ide/
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-diff -Nur linux-4.1.26.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.1.26/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---- linux-4.1.26.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2016-06-19 15:30:58.639295498 +0200
-@@ -821,7 +821,7 @@
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 0d23e0568deb..140c94ce71c5 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -821,7 +821,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "restarting multicast task\n");
@@ -6966,7 +7811,7 @@ diff -Nur linux-4.1.26.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux
netif_addr_lock(dev);
spin_lock(&priv->lock);
-@@ -903,7 +903,7 @@
+@@ -903,7 +903,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
@@ -6975,10 +7820,11 @@ diff -Nur linux-4.1.26.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux
/*
* make sure the in-flight joins have finished before we attempt
-diff -Nur linux-4.1.26.orig/drivers/input/gameport/gameport.c linux-4.1.26/drivers/input/gameport/gameport.c
---- linux-4.1.26.orig/drivers/input/gameport/gameport.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/input/gameport/gameport.c 2016-06-19 15:30:58.639295498 +0200
-@@ -124,12 +124,12 @@
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
+index e853a2134680..5b6aa39a1de7 100644
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport)
tx = 1 << 30;
for(i = 0; i < 50; i++) {
@@ -6993,7 +7839,7 @@ diff -Nur linux-4.1.26.orig/drivers/input/gameport/gameport.c linux-4.1.26/drive
udelay(i * 10);
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
}
-@@ -148,11 +148,11 @@
+@@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport)
tx = 1 << 30;
for(i = 0; i < 50; i++) {
@@ -7007,10 +7853,11 @@ diff -Nur linux-4.1.26.orig/drivers/input/gameport/gameport.c linux-4.1.26/drive
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
-diff -Nur linux-4.1.26.orig/drivers/leds/trigger/Kconfig linux-4.1.26/drivers/leds/trigger/Kconfig
---- linux-4.1.26.orig/drivers/leds/trigger/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/leds/trigger/Kconfig 2016-06-19 15:30:58.639295498 +0200
-@@ -61,7 +61,7 @@
+diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
+index 49794b47b51c..3d7245d6b2f8 100644
+--- a/drivers/leds/trigger/Kconfig
++++ b/drivers/leds/trigger/Kconfig
+@@ -61,7 +61,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
@@ -7019,9 +7866,10 @@ diff -Nur linux-4.1.26.orig/drivers/leds/trigger/Kconfig linux-4.1.26/drivers/le
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
-diff -Nur linux-4.1.26.orig/drivers/md/bcache/Kconfig linux-4.1.26/drivers/md/bcache/Kconfig
---- linux-4.1.26.orig/drivers/md/bcache/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/md/bcache/Kconfig 2016-06-19 15:30:58.639295498 +0200
+diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
+index 4d200883c505..98b64ed5cb81 100644
+--- a/drivers/md/bcache/Kconfig
++++ b/drivers/md/bcache/Kconfig
@@ -1,6 +1,7 @@
config BCACHE
@@ -7030,10 +7878,11 @@ diff -Nur linux-4.1.26.orig/drivers/md/bcache/Kconfig linux-4.1.26/drivers/md/bc
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
-diff -Nur linux-4.1.26.orig/drivers/md/dm.c linux-4.1.26/drivers/md/dm.c
---- linux-4.1.26.orig/drivers/md/dm.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/md/dm.c 2016-06-19 15:30:58.639295498 +0200
-@@ -2133,7 +2133,7 @@
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 87de9a0848b7..86f64c13ccf6 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2141,7 +2141,7 @@ static void dm_request_fn(struct request_queue *q)
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
@@ -7042,10 +7891,11 @@ diff -Nur linux-4.1.26.orig/drivers/md/dm.c linux-4.1.26/drivers/md/dm.c
}
goto out;
-diff -Nur linux-4.1.26.orig/drivers/md/raid5.c linux-4.1.26/drivers/md/raid5.c
---- linux-4.1.26.orig/drivers/md/raid5.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/md/raid5.c 2016-06-19 15:30:58.639295498 +0200
-@@ -1918,8 +1918,9 @@
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index ef0a99a3a779..4e60997ef19a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1918,8 +1918,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -7056,7 +7906,7 @@ diff -Nur linux-4.1.26.orig/drivers/md/raid5.c linux-4.1.26/drivers/md/raid5.c
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1975,7 +1976,8 @@
+@@ -1975,7 +1976,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -7066,7 +7916,7 @@ diff -Nur linux-4.1.26.orig/drivers/md/raid5.c linux-4.1.26/drivers/md/raid5.c
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6375,6 +6377,7 @@
+@@ -6375,6 +6377,7 @@ static int raid5_alloc_percpu(struct r5conf *conf)
__func__, cpu);
break;
}
@@ -7074,10 +7924,11 @@ diff -Nur linux-4.1.26.orig/drivers/md/raid5.c linux-4.1.26/drivers/md/raid5.c
}
put_online_cpus();
-diff -Nur linux-4.1.26.orig/drivers/md/raid5.h linux-4.1.26/drivers/md/raid5.h
---- linux-4.1.26.orig/drivers/md/raid5.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/md/raid5.h 2016-06-19 15:30:58.639295498 +0200
-@@ -495,6 +495,7 @@
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index d31ed93bb8a9..82fc623bf0b0 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -495,6 +495,7 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
@@ -7085,9 +7936,97 @@ diff -Nur linux-4.1.26.orig/drivers/md/raid5.h linux-4.1.26/drivers/md/raid5.h
struct page *spare_page; /* Used when checking P/Q in raid6 */
struct flex_array *scribble; /* space for constructing buffer
* lists and performing address
-diff -Nur linux-4.1.26.orig/drivers/misc/hwlat_detector.c linux-4.1.26/drivers/misc/hwlat_detector.c
---- linux-4.1.26.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/drivers/misc/hwlat_detector.c 2016-06-19 15:30:58.643295653 +0200
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index b3c10b7dae1f..b9d7f076f2f8 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC
+ are combined to make a single 32-bit timer.
+
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
++ may be used as a clock event device supporting oneshot mode.
+
+ config ATMEL_TCB_CLKSRC_BLOCK
+ int
+@@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
+
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y if !PREEMPT_RT_FULL
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock
++ source for clock events.
++
++
+ config DUMMY_IRQ
+ tristate "Dummy IRQ handler"
+ default n
+@@ -113,6 +122,35 @@ config IBM_ASM
+ for information on the specific driver level and support statement
+ for your IBM server.
+
++config HWLAT_DETECTOR
++ tristate "Testing module to detect hardware-induced latencies"
++ depends on DEBUG_FS
++ depends on RING_BUFFER
++ default m
++ ---help---
++ A simple hardware latency detector. Use this module to detect
++ large latencies introduced by the behavior of the underlying
++ system firmware external to Linux. We do this using periodic
++ use of stop_machine to grab all available CPUs and measure
++ for unexplainable gaps in the CPU timestamp counter(s). By
++ default, the module is not enabled until the "enable" file
++ within the "hwlat_detector" debugfs directory is toggled.
++
++ This module is often used to detect SMI (System Management
++ Interrupts) on x86 systems, though is not x86 specific. To
++ this end, we default to using a sample window of 1 second,
++ during which we will sample for 0.5 seconds. If an SMI or
++ similar event occurs during that time, it is recorded
++ into an 8K samples global ring buffer until retreived.
++
++ WARNING: This software should never be enabled (it can be built
++ but should not be turned on after it is loaded) in a production
++ environment where high latencies are a concern since the
++ sampling mechanism actually introduces latencies for
++ regular tasks while the CPU(s) are being held.
++
++ If unsure, say N
++
+ config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index 7d5c4cd118c4..6a8e39388cf9 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -38,6 +38,7 @@ obj-$(CONFIG_C2PORT) += c2port/
+ obj-$(CONFIG_HMC6352) += hmc6352.o
+ obj-y += eeprom/
+ obj-y += cb710/
++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
+ obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
+ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
+ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
+diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
+new file mode 100644
+index 000000000000..2429c4331e68
+--- /dev/null
++++ b/drivers/misc/hwlat_detector.c
@@ -0,0 +1,1240 @@
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
@@ -8329,94 +9268,11 @@ diff -Nur linux-4.1.26.orig/drivers/misc/hwlat_detector.c linux-4.1.26/drivers/m
+
+module_init(detector_init);
+module_exit(detector_exit);
-diff -Nur linux-4.1.26.orig/drivers/misc/Kconfig linux-4.1.26/drivers/misc/Kconfig
---- linux-4.1.26.orig/drivers/misc/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/misc/Kconfig 2016-06-19 15:30:58.639295498 +0200
-@@ -54,6 +54,7 @@
- config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on (AVR32 || ARCH_AT91)
-+ default y if PREEMPT_RT_FULL
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
-@@ -69,8 +70,7 @@
- are combined to make a single 32-bit timer.
-
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
-- may be used as a clock event device supporting oneshot mode
-- (delays of up to two seconds) based on the 32 KiHz clock.
-+ may be used as a clock event device supporting oneshot mode.
-
- config ATMEL_TCB_CLKSRC_BLOCK
- int
-@@ -84,6 +84,15 @@
- TC can be used for other purposes, such as PWM generation and
- interval timing.
-
-+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
-+ bool "TC Block use 32 KiHz clock"
-+ depends on ATMEL_TCB_CLKSRC
-+ default y if !PREEMPT_RT_FULL
-+ help
-+ Select this to use 32 KiHz base clock rate as TC block clock
-+ source for clock events.
-+
-+
- config DUMMY_IRQ
- tristate "Dummy IRQ handler"
- default n
-@@ -113,6 +122,35 @@
- for information on the specific driver level and support statement
- for your IBM server.
-
-+config HWLAT_DETECTOR
-+ tristate "Testing module to detect hardware-induced latencies"
-+ depends on DEBUG_FS
-+ depends on RING_BUFFER
-+ default m
-+ ---help---
-+ A simple hardware latency detector. Use this module to detect
-+ large latencies introduced by the behavior of the underlying
-+ system firmware external to Linux. We do this using periodic
-+ use of stop_machine to grab all available CPUs and measure
-+ for unexplainable gaps in the CPU timestamp counter(s). By
-+ default, the module is not enabled until the "enable" file
-+ within the "hwlat_detector" debugfs directory is toggled.
-+
-+ This module is often used to detect SMI (System Management
-+ Interrupts) on x86 systems, though is not x86 specific. To
-+ this end, we default to using a sample window of 1 second,
-+ during which we will sample for 0.5 seconds. If an SMI or
-+ similar event occurs during that time, it is recorded
-+ into an 8K samples global ring buffer until retreived.
-+
-+ WARNING: This software should never be enabled (it can be built
-+ but should not be turned on after it is loaded) in a production
-+ environment where high latencies are a concern since the
-+ sampling mechanism actually introduces latencies for
-+ regular tasks while the CPU(s) are being held.
-+
-+ If unsure, say N
-+
- config PHANTOM
- tristate "Sensable PHANToM (PCI)"
- depends on PCI
-diff -Nur linux-4.1.26.orig/drivers/misc/Makefile linux-4.1.26/drivers/misc/Makefile
---- linux-4.1.26.orig/drivers/misc/Makefile 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/misc/Makefile 2016-06-19 15:30:58.639295498 +0200
-@@ -38,6 +38,7 @@
- obj-$(CONFIG_HMC6352) += hmc6352.o
- obj-y += eeprom/
- obj-y += cb710/
-+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
- obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
- obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
- obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-diff -Nur linux-4.1.26.orig/drivers/mmc/host/mmci.c linux-4.1.26/drivers/mmc/host/mmci.c
---- linux-4.1.26.orig/drivers/mmc/host/mmci.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/mmc/host/mmci.c 2016-06-19 15:30:58.643295653 +0200
-@@ -1155,15 +1155,12 @@
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index acece3299756..58ea04a03fa9 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1155,15 +1155,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
@@ -8432,7 +9288,7 @@ diff -Nur linux-4.1.26.orig/drivers/mmc/host/mmci.c linux-4.1.26/drivers/mmc/hos
do {
unsigned int remain, len;
char *buffer;
-@@ -1203,8 +1200,6 @@
+@@ -1203,8 +1200,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
sg_miter_stop(sg_miter);
@@ -8441,10 +9297,11 @@ diff -Nur linux-4.1.26.orig/drivers/mmc/host/mmci.c linux-4.1.26/drivers/mmc/hos
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.26/drivers/net/ethernet/3com/3c59x.c
---- linux-4.1.26.orig/drivers/net/ethernet/3com/3c59x.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/3com/3c59x.c 2016-06-19 15:30:58.643295653 +0200
-@@ -842,9 +842,9 @@
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index 41095ebad97f..b0a0cb22aec4 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
@@ -8456,7 +9313,7 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.26/drive
}
#endif
-@@ -1916,12 +1916,12 @@
+@@ -1916,12 +1916,12 @@ static void vortex_tx_timeout(struct net_device *dev)
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
@@ -8471,10 +9328,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.26/drive
}
}
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-4.1.26/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
---- linux-4.1.26.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2016-06-19 15:30:58.643295653 +0200
-@@ -2212,11 +2212,7 @@
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 6e9036a06515..cc956b06ad18 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -2212,11 +2212,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
}
tpd_req = atl1c_cal_tpd_req(skb);
@@ -8487,10 +9345,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linu
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-4.1.26/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
---- linux-4.1.26.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2016-06-19 15:30:58.643295653 +0200
-@@ -1880,8 +1880,7 @@
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 59a03a193e83..734f7a7ad2c3 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1880,8 +1880,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
@@ -8500,10 +9359,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linu
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.1.26/drivers/net/ethernet/chelsio/cxgb/sge.c
---- linux-4.1.26.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/chelsio/cxgb/sge.c 2016-06-19 15:30:58.643295653 +0200
-@@ -1664,8 +1664,7 @@
+diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
+index 526ea74e82d9..86f467a2c485 100644
+--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
+@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
@@ -8513,10 +9373,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.1.26
reclaim_completed_tx(sge, q);
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.26/drivers/net/ethernet/freescale/gianfar.c
---- linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/freescale/gianfar.c 2016-06-19 15:30:58.643295653 +0200
-@@ -1540,7 +1540,7 @@
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 4ee080d49bc0..e616b71d5014 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1540,7 +1540,7 @@ static int gfar_suspend(struct device *dev)
if (netif_running(ndev)) {
@@ -8525,7 +9386,7 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.2
lock_tx_qs(priv);
gfar_halt_nodisable(priv);
-@@ -1556,7 +1556,7 @@
+@@ -1556,7 +1556,7 @@ static int gfar_suspend(struct device *dev)
gfar_write(&regs->maccfg1, tempval);
unlock_tx_qs(priv);
@@ -8534,7 +9395,7 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.2
disable_napi(priv);
-@@ -1598,7 +1598,7 @@
+@@ -1598,7 +1598,7 @@ static int gfar_resume(struct device *dev)
/* Disable Magic Packet mode, in case something
* else woke us up.
*/
@@ -8543,7 +9404,7 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.2
lock_tx_qs(priv);
tempval = gfar_read(&regs->maccfg2);
-@@ -1608,7 +1608,7 @@
+@@ -1608,7 +1608,7 @@ static int gfar_resume(struct device *dev)
gfar_start(priv);
unlock_tx_qs(priv);
@@ -8552,7 +9413,7 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.2
netif_device_attach(ndev);
-@@ -3418,14 +3418,14 @@
+@@ -3418,14 +3418,14 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
@@ -8569,10 +9430,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.2
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/neterion/s2io.c linux-4.1.26/drivers/net/ethernet/neterion/s2io.c
---- linux-4.1.26.orig/drivers/net/ethernet/neterion/s2io.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/neterion/s2io.c 2016-06-19 15:30:58.647295807 +0200
-@@ -4084,12 +4084,7 @@
+diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
+index 1e0f72b65459..bb5ced2b5194 100644
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -4084,12 +4084,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
@@ -8586,10 +9448,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/neterion/s2io.c linux-4.1.26/dr
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-4.1.26/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
---- linux-4.1.26.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2016-06-19 15:30:58.647295807 +0200
-@@ -2137,10 +2137,8 @@
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 3b98b263bad0..ca4add749410 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
@@ -8602,10 +9465,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/realtek/8139too.c linux-4.1.26/drivers/net/ethernet/realtek/8139too.c
---- linux-4.1.26.orig/drivers/net/ethernet/realtek/8139too.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/realtek/8139too.c 2016-06-19 15:30:58.647295807 +0200
-@@ -2229,7 +2229,7 @@
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index 78bb4ceb1cdd..b5156963ca07 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
@@ -8614,10 +9478,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/realtek/8139too.c linux-4.1.26/
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
-diff -Nur linux-4.1.26.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.1.26/drivers/net/ethernet/tehuti/tehuti.c
---- linux-4.1.26.orig/drivers/net/ethernet/tehuti/tehuti.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/ethernet/tehuti/tehuti.c 2016-06-19 15:30:58.647295807 +0200
-@@ -1629,13 +1629,8 @@
+diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
+index a9cac8413e49..bd70b848174d 100644
+--- a/drivers/net/ethernet/tehuti/tehuti.c
++++ b/drivers/net/ethernet/tehuti/tehuti.c
+@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
unsigned long flags;
ENTER;
@@ -8633,10 +9498,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.1.26/dr
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
-diff -Nur linux-4.1.26.orig/drivers/net/rionet.c linux-4.1.26/drivers/net/rionet.c
---- linux-4.1.26.orig/drivers/net/rionet.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/rionet.c 2016-06-19 15:30:58.647295807 +0200
-@@ -174,11 +174,7 @@
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 18cc2c8d5447..a5e0ef3c02d1 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
int add_num = 1;
@@ -8649,10 +9515,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/rionet.c linux-4.1.26/drivers/net/rionet
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
-diff -Nur linux-4.1.26.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.1.26/drivers/net/wireless/orinoco/orinoco_usb.c
---- linux-4.1.26.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/net/wireless/orinoco/orinoco_usb.c 2016-06-19 15:30:58.647295807 +0200
-@@ -697,7 +697,7 @@
+diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
+index 91f05442de28..8fb1c92724df 100644
+--- a/drivers/net/wireless/orinoco/orinoco_usb.c
++++ b/drivers/net/wireless/orinoco/orinoco_usb.c
+@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
@@ -8661,10 +9528,11 @@ diff -Nur linux-4.1.26.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.1
ctx->done.done);
}
break;
-diff -Nur linux-4.1.26.orig/drivers/pci/access.c linux-4.1.26/drivers/pci/access.c
---- linux-4.1.26.orig/drivers/pci/access.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/pci/access.c 2016-06-19 15:30:58.647295807 +0200
-@@ -561,7 +561,7 @@
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 502a82ca1db0..6bb46e0a3349 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -561,7 +561,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
@@ -8673,10 +9541,11 @@ diff -Nur linux-4.1.26.orig/drivers/pci/access.c linux-4.1.26/drivers/pci/access
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
-diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.26/drivers/scsi/fcoe/fcoe.c
---- linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/scsi/fcoe/fcoe.c 2016-06-19 15:30:58.647295807 +0200
-@@ -1287,7 +1287,7 @@
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index ec193a8357d7..455bf9c67b16 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1287,7 +1287,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
@@ -8685,7 +9554,7 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.26/drivers/scsi/f
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
-@@ -1343,7 +1343,7 @@
+@@ -1343,7 +1343,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
@@ -8694,7 +9563,7 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.26/drivers/scsi/f
#else
/*
* This a non-SMP scenario where the singular Rx thread is
-@@ -1567,11 +1567,11 @@
+@@ -1567,11 +1567,11 @@ err2:
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
@@ -8709,7 +9578,7 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.26/drivers/scsi/f
return rc;
}
-@@ -1767,11 +1767,11 @@
+@@ -1767,11 +1767,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
@@ -8723,7 +9592,16 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.26/drivers/scsi/f
return -EINVAL;
}
-@@ -1847,13 +1847,13 @@
+@@ -1815,7 +1815,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1847,13 +1847,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
@@ -8739,10 +9617,11 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.26/drivers/scsi/f
kfree_skb(skb);
}
-diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.26/drivers/scsi/fcoe/fcoe_ctlr.c
---- linux-4.1.26.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/scsi/fcoe/fcoe_ctlr.c 2016-06-19 15:30:58.651295961 +0200
-@@ -831,7 +831,7 @@
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 34a1b1f333b4..d91131210695 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -831,7 +831,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
INIT_LIST_HEAD(&del_list);
@@ -8751,7 +9630,7 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.26/drivers/s
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -867,7 +867,7 @@
+@@ -867,7 +867,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
@@ -8760,10 +9639,11 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.26/drivers/s
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
-diff -Nur linux-4.1.26.orig/drivers/scsi/libfc/fc_exch.c linux-4.1.26/drivers/scsi/libfc/fc_exch.c
---- linux-4.1.26.orig/drivers/scsi/libfc/fc_exch.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/scsi/libfc/fc_exch.c 2016-06-19 15:30:58.651295961 +0200
-@@ -814,10 +814,10 @@
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 30f9ef0c0d4f..6c686bc01a82 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
@@ -8776,10 +9656,11 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/libfc/fc_exch.c linux-4.1.26/drivers/sc
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
-diff -Nur linux-4.1.26.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.26/drivers/scsi/libsas/sas_ata.c
---- linux-4.1.26.orig/drivers/scsi/libsas/sas_ata.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/scsi/libsas/sas_ata.c 2016-06-19 15:30:58.651295961 +0200
-@@ -190,7 +190,7 @@
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 9c706d8c1441..d968ffc79c08 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
/* TODO: audit callers to ensure they are ready for qc_issue to
* unconditionally re-enable interrupts
*/
@@ -8788,7 +9669,7 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.26/drivers/s
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
-@@ -255,7 +255,7 @@
+@@ -255,7 +255,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
out:
spin_lock(ap->lock);
@@ -8797,10 +9678,11 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.26/drivers/s
return ret;
}
-diff -Nur linux-4.1.26.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.1.26/drivers/scsi/qla2xxx/qla_inline.h
---- linux-4.1.26.orig/drivers/scsi/qla2xxx/qla_inline.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/scsi/qla2xxx/qla_inline.h 2016-06-19 15:30:58.651295961 +0200
-@@ -59,12 +59,12 @@
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index fee9eb7c8a60..b42d4adc42dc 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp)
{
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
@@ -8815,9 +9697,10 @@ diff -Nur linux-4.1.26.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.1.26/drive
}
static inline uint8_t *
-diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/drivers/thermal/x86_pkg_temp_thermal.c
---- linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/thermal/x86_pkg_temp_thermal.c 2016-06-19 15:30:58.651295961 +0200
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 9ea3d9d49ffc..9e68706ae5e2 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <linux/thermal.h>
@@ -8826,7 +9709,7 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
#include <asm/cpu_device_id.h>
#include <asm/mce.h>
-@@ -352,7 +353,7 @@
+@@ -352,7 +353,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
}
}
@@ -8835,7 +9718,7 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
{
unsigned long flags;
int cpu = smp_processor_id();
-@@ -369,7 +370,7 @@
+@@ -369,7 +370,7 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
pkg_work_scheduled[phy_id]) {
disable_pkg_thres_interrupt();
spin_unlock_irqrestore(&pkg_work_lock, flags);
@@ -8844,7 +9727,7 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
}
pkg_work_scheduled[phy_id] = 1;
spin_unlock_irqrestore(&pkg_work_lock, flags);
-@@ -378,9 +379,48 @@
+@@ -378,9 +379,48 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
schedule_delayed_work_on(cpu,
&per_cpu(pkg_temp_thermal_threshold_work, cpu),
msecs_to_jiffies(notify_delay_ms));
@@ -8893,7 +9776,7 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
static int find_siblings_cpu(int cpu)
{
int i;
-@@ -584,6 +624,9 @@
+@@ -584,6 +624,9 @@ static int __init pkg_temp_thermal_init(void)
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
@@ -8903,7 +9786,7 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
spin_lock_init(&pkg_work_lock);
platform_thermal_package_notify =
pkg_temp_thermal_platform_thermal_notify;
-@@ -608,7 +651,7 @@
+@@ -608,7 +651,7 @@ err_ret:
kfree(pkg_work_scheduled);
platform_thermal_package_notify = NULL;
platform_thermal_package_rate_control = NULL;
@@ -8912,7 +9795,7 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
return -ENODEV;
}
-@@ -633,6 +676,7 @@
+@@ -633,6 +676,7 @@ static void __exit pkg_temp_thermal_exit(void)
mutex_unlock(&phy_dev_list_mutex);
platform_thermal_package_notify = NULL;
platform_thermal_package_rate_control = NULL;
@@ -8920,9 +9803,10 @@ diff -Nur linux-4.1.26.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.26/
for_each_online_cpu(i)
cancel_delayed_work_sync(
&per_cpu(pkg_temp_thermal_threshold_work, i));
-diff -Nur linux-4.1.26.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.26/drivers/tty/serial/8250/8250_core.c
---- linux-4.1.26.orig/drivers/tty/serial/8250/8250_core.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/tty/serial/8250/8250_core.c 2016-06-19 15:30:58.651295961 +0200
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index a64d53f7b1d1..fd96ce65bc31 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
@@ -36,6 +36,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
@@ -8931,7 +9815,7 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.26/dri
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#ifdef CONFIG_SPARC
-@@ -80,7 +81,16 @@
+@@ -80,7 +81,16 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
#define DEBUG_INTR(fmt...) do { } while (0)
#endif
@@ -8949,7 +9833,7 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.26/dri
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-@@ -3372,7 +3382,7 @@
+@@ -3366,7 +3376,7 @@ static void serial8250_console_write(struct uart_8250_port *up, const char *s,
if (port->sysrq)
locked = 0;
@@ -8958,10 +9842,11 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.26/dri
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
-diff -Nur linux-4.1.26.orig/drivers/tty/serial/amba-pl011.c linux-4.1.26/drivers/tty/serial/amba-pl011.c
---- linux-4.1.26.orig/drivers/tty/serial/amba-pl011.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/tty/serial/amba-pl011.c 2016-06-19 15:30:58.651295961 +0200
-@@ -2000,13 +2000,19 @@
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 0cc622afb67d..52f45f3029b7 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2000,13 +2000,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_enable(uap->clk);
@@ -8984,7 +9869,7 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/amba-pl011.c linux-4.1.26/drivers
/*
* First save the CR then disable the interrupts
-@@ -2028,8 +2034,7 @@
+@@ -2028,8 +2034,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
@@ -8994,10 +9879,11 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/amba-pl011.c linux-4.1.26/drivers
clk_disable(uap->clk);
}
-diff -Nur linux-4.1.26.orig/drivers/tty/serial/omap-serial.c linux-4.1.26/drivers/tty/serial/omap-serial.c
---- linux-4.1.26.orig/drivers/tty/serial/omap-serial.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/tty/serial/omap-serial.c 2016-06-19 15:30:58.651295961 +0200
-@@ -1282,13 +1282,10 @@
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 0a88693cd8ca..b89b06ed3b74 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1282,13 +1282,10 @@ serial_omap_console_write(struct console *co, const char *s,
pm_runtime_get_sync(up->dev);
@@ -9014,7 +9900,7 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/omap-serial.c linux-4.1.26/driver
/*
* First save the IER then disable the interrupts
-@@ -1317,8 +1314,7 @@
+@@ -1317,8 +1314,7 @@ serial_omap_console_write(struct console *co, const char *s,
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
@@ -9024,10 +9910,11 @@ diff -Nur linux-4.1.26.orig/drivers/tty/serial/omap-serial.c linux-4.1.26/driver
}
static int __init
-diff -Nur linux-4.1.26.orig/drivers/usb/core/hcd.c linux-4.1.26/drivers/usb/core/hcd.c
---- linux-4.1.26.orig/drivers/usb/core/hcd.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/usb/core/hcd.c 2016-06-19 15:30:58.655296115 +0200
-@@ -1684,9 +1684,9 @@
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 3a49ba2910df..c55c42603849 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1684,9 +1684,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
@@ -9039,10 +9926,11 @@ diff -Nur linux-4.1.26.orig/drivers/usb/core/hcd.c linux-4.1.26/drivers/usb/core
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
-diff -Nur linux-4.1.26.orig/drivers/usb/gadget/function/f_fs.c linux-4.1.26/drivers/usb/gadget/function/f_fs.c
---- linux-4.1.26.orig/drivers/usb/gadget/function/f_fs.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/usb/gadget/function/f_fs.c 2016-06-19 15:30:58.655296115 +0200
-@@ -1404,7 +1404,7 @@
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index db9433eed2cc..6536d557abc1 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1404,7 +1404,7 @@ static void ffs_data_put(struct ffs_data *ffs)
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -9051,10 +9939,11 @@ diff -Nur linux-4.1.26.orig/drivers/usb/gadget/function/f_fs.c linux-4.1.26/driv
kfree(ffs->dev_name);
kfree(ffs);
}
-diff -Nur linux-4.1.26.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.26/drivers/usb/gadget/legacy/inode.c
---- linux-4.1.26.orig/drivers/usb/gadget/legacy/inode.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/drivers/usb/gadget/legacy/inode.c 2016-06-19 15:30:58.655296115 +0200
-@@ -345,7 +345,7 @@
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index bccc5788bb98..8c23636963bc 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
@@ -9063,7 +9952,7 @@ diff -Nur linux-4.1.26.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.26/drive
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
-@@ -354,7 +354,7 @@
+@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
@@ -9072,9 +9961,73 @@ diff -Nur linux-4.1.26.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.26/drive
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
-diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
---- linux-4.1.26.orig/fs/aio.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/aio.c 2016-06-19 15:30:58.655296115 +0200
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index d6ca3697d3c8..ee6bc64f9656 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -17,7 +17,9 @@
+ #include <linux/device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/list.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/platform_device.h>
++#include <linux/regmap.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+ #include <linux/usb/atmel_usba_udc.h>
+@@ -1889,20 +1891,15 @@ static int atmel_usba_stop(struct usb_gadget *gadget)
+ #ifdef CONFIG_OF
+ static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
+ {
+- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
+-
+- if (is_on)
+- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
+- else
+- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
++ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
++ is_on ? AT91_PMC_BIASEN : 0);
+ }
+
+ static void at91sam9g45_pulse_bias(struct usba_udc *udc)
+ {
+- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
+-
+- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
++ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
++ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
++ AT91_PMC_BIASEN);
+ }
+
+ static const struct usba_udc_errata at91sam9rl_errata = {
+@@ -1939,6 +1936,9 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
+ return ERR_PTR(-EINVAL);
+
+ udc->errata = match->data;
++ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
++ if (udc->errata && IS_ERR(udc->pmc))
++ return ERR_CAST(udc->pmc);
+
+ udc->num_ep = 0;
+
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
+index ea448a344767..3e1c9d589dfa 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
+@@ -354,6 +354,8 @@ struct usba_udc {
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_regs;
+ #endif
++
++ struct regmap *pmc;
+ };
+
+ static inline struct usba_ep *to_usba_ep(struct usb_ep *ep)
+diff --git a/fs/aio.c b/fs/aio.c
+index 480440f4701f..5a2380de4a9b 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
@@ -9083,7 +10036,7 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
-@@ -115,7 +116,7 @@
+@@ -115,7 +116,7 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
@@ -9092,7 +10045,7 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
/*
* signals when all in-flight requests are done
-@@ -253,6 +254,7 @@
+@@ -253,6 +254,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
@@ -9100,7 +10053,7 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
-@@ -559,9 +561,9 @@
+@@ -559,9 +561,9 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
return cancel(&kiocb->common);
}
@@ -9112,7 +10065,7 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
pr_debug("freeing %p\n", ctx);
-@@ -580,8 +582,8 @@
+@@ -580,8 +582,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
@@ -9123,7 +10076,7 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
}
/*
-@@ -589,9 +591,9 @@
+@@ -589,9 +591,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -9135,7 +10088,7 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -610,6 +612,14 @@
+@@ -610,6 +612,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
percpu_ref_put(&ctx->reqs);
}
@@ -9150,9 +10103,10 @@ diff -Nur linux-4.1.26.orig/fs/aio.c linux-4.1.26/fs/aio.c
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
-diff -Nur linux-4.1.26.orig/fs/autofs4/autofs_i.h linux-4.1.26/fs/autofs4/autofs_i.h
---- linux-4.1.26.orig/fs/autofs4/autofs_i.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/autofs4/autofs_i.h 2016-06-19 15:30:58.655296115 +0200
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index 6196b5eaf9a5..ab37f57136f3 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
@@ -9161,10 +10115,11 @@ diff -Nur linux-4.1.26.orig/fs/autofs4/autofs_i.h linux-4.1.26/fs/autofs4/autofs
#include <asm/current.h>
#include <asm/uaccess.h>
-diff -Nur linux-4.1.26.orig/fs/autofs4/expire.c linux-4.1.26/fs/autofs4/expire.c
---- linux-4.1.26.orig/fs/autofs4/expire.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/autofs4/expire.c 2016-06-19 15:30:58.655296115 +0200
-@@ -150,7 +150,7 @@
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 7a5a598a2d94..d08bcdc30566 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -150,7 +150,7 @@ again:
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
@@ -9173,10 +10128,11 @@ diff -Nur linux-4.1.26.orig/fs/autofs4/expire.c linux-4.1.26/fs/autofs4/expire.c
goto relock;
}
spin_unlock(&p->d_lock);
-diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
---- linux-4.1.26.orig/fs/buffer.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/buffer.c 2016-06-19 15:30:58.655296115 +0200
-@@ -301,8 +301,7 @@
+diff --git a/fs/buffer.c b/fs/buffer.c
+index c7a5602d01ee..2907544c3a1d 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* decide that the page is now completely done.
*/
first = page_buffers(page);
@@ -9186,7 +10142,7 @@ diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -315,8 +314,7 @@
+@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -9196,7 +10152,7 @@ diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
/*
* If none of the buffers had errors and they are all
-@@ -328,9 +326,7 @@
+@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
return;
still_busy:
@@ -9207,7 +10163,7 @@ diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
}
/*
-@@ -358,8 +354,7 @@
+@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
first = page_buffers(page);
@@ -9217,7 +10173,7 @@ diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -371,15 +366,12 @@
+@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
}
@@ -9235,7 +10191,7 @@ diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3325,6 +3317,7 @@
+@@ -3325,6 +3317,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -9243,9 +10199,10 @@ diff -Nur linux-4.1.26.orig/fs/buffer.c linux-4.1.26/fs/buffer.c
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
-diff -Nur linux-4.1.26.orig/fs/dcache.c linux-4.1.26/fs/dcache.c
---- linux-4.1.26.orig/fs/dcache.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/dcache.c 2016-06-19 15:30:58.655296115 +0200
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 660857431b1c..c790b2b070ab 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
@@ -9254,16 +10211,38 @@ diff -Nur linux-4.1.26.orig/fs/dcache.c linux-4.1.26/fs/dcache.c
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
-@@ -578,7 +579,7 @@
+@@ -747,6 +748,8 @@ static inline bool fast_dput(struct dentry *dentry)
+ */
+ void dput(struct dentry *dentry)
+ {
++ struct dentry *parent;
++
+ if (unlikely(!dentry))
+ return;
- failed:
- spin_unlock(&dentry->d_lock);
-- cpu_relax();
-+ cpu_chill();
- return dentry; /* try again with same dentry */
- }
+@@ -783,9 +786,18 @@ repeat:
+ return;
-@@ -2388,7 +2389,7 @@
+ kill_it:
+- dentry = dentry_kill(dentry);
+- if (dentry) {
+- cond_resched();
++ parent = dentry_kill(dentry);
++ if (parent) {
++ int r;
++
++ if (parent == dentry) {
++ /* the task with the highest priority won't schedule */
++ r = cond_resched();
++ if (!r)
++ cpu_chill();
++ } else {
++ dentry = parent;
++ }
+ goto repeat;
+ }
+ }
+@@ -2391,7 +2403,7 @@ again:
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
@@ -9272,10 +10251,11 @@ diff -Nur linux-4.1.26.orig/fs/dcache.c linux-4.1.26/fs/dcache.c
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-diff -Nur linux-4.1.26.orig/fs/eventpoll.c linux-4.1.26/fs/eventpoll.c
---- linux-4.1.26.orig/fs/eventpoll.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/eventpoll.c 2016-06-19 15:30:58.655296115 +0200
-@@ -505,12 +505,12 @@
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 1e009cad8d5c..d0c12504d3b4 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
@@ -9290,10 +10270,11 @@ diff -Nur linux-4.1.26.orig/fs/eventpoll.c linux-4.1.26/fs/eventpoll.c
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
-diff -Nur linux-4.1.26.orig/fs/exec.c linux-4.1.26/fs/exec.c
---- linux-4.1.26.orig/fs/exec.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/exec.c 2016-06-19 15:30:58.659296270 +0200
-@@ -859,12 +859,14 @@
+diff --git a/fs/exec.c b/fs/exec.c
+index 1977c2a553ac..0e7125be0283 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -859,12 +859,14 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
@@ -9308,9 +10289,10 @@ diff -Nur linux-4.1.26.orig/fs/exec.c linux-4.1.26/fs/exec.c
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
-diff -Nur linux-4.1.26.orig/fs/f2fs/f2fs.h linux-4.1.26/fs/f2fs/f2fs.h
---- linux-4.1.26.orig/fs/f2fs/f2fs.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/f2fs/f2fs.h 2016-06-19 15:30:58.659296270 +0200
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 8de34ab6d5b1..4e80270703a4 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
@@ -22,7 +22,6 @@
#ifdef CONFIG_F2FS_CHECK_FS
@@ -9327,7 +10309,7 @@ diff -Nur linux-4.1.26.orig/fs/f2fs/f2fs.h linux-4.1.26/fs/f2fs/f2fs.h
#endif
/*
-@@ -838,7 +836,7 @@
+@@ -838,7 +836,7 @@ static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
@@ -9336,10 +10318,11 @@ diff -Nur linux-4.1.26.orig/fs/f2fs/f2fs.h linux-4.1.26/fs/f2fs/f2fs.h
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
-diff -Nur linux-4.1.26.orig/fs/jbd/checkpoint.c linux-4.1.26/fs/jbd/checkpoint.c
---- linux-4.1.26.orig/fs/jbd/checkpoint.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/jbd/checkpoint.c 2016-06-19 15:30:58.659296270 +0200
-@@ -129,6 +129,8 @@
+diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
+index 08c03044abdd..95debd71e5fa 100644
+--- a/fs/jbd/checkpoint.c
++++ b/fs/jbd/checkpoint.c
+@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *journal)
if (journal->j_flags & JFS_ABORT)
return;
spin_unlock(&journal->j_state_lock);
@@ -9348,10 +10331,11 @@ diff -Nur linux-4.1.26.orig/fs/jbd/checkpoint.c linux-4.1.26/fs/jbd/checkpoint.c
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-4.1.26.orig/fs/jbd2/checkpoint.c linux-4.1.26/fs/jbd2/checkpoint.c
---- linux-4.1.26.orig/fs/jbd2/checkpoint.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/jbd2/checkpoint.c 2016-06-19 15:30:58.659296270 +0200
-@@ -116,6 +116,8 @@
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 8c44654ce274..78c1545a3fab 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
write_unlock(&journal->j_state_lock);
@@ -9360,9 +10344,10 @@ diff -Nur linux-4.1.26.orig/fs/jbd2/checkpoint.c linux-4.1.26/fs/jbd2/checkpoint
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-4.1.26.orig/fs/namespace.c linux-4.1.26/fs/namespace.c
---- linux-4.1.26.orig/fs/namespace.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/namespace.c 2016-06-19 15:30:58.659296270 +0200
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 556721fb0cf6..d27cd4633f59 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
@@ -9371,7 +10356,7 @@ diff -Nur linux-4.1.26.orig/fs/namespace.c linux-4.1.26/fs/namespace.c
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/init.h> /* init_rootfs */
-@@ -353,8 +354,11 @@
+@@ -353,8 +354,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
@@ -9385,10 +10370,11 @@ diff -Nur linux-4.1.26.orig/fs/namespace.c linux-4.1.26/fs/namespace.c
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
-diff -Nur linux-4.1.26.orig/fs/ntfs/aops.c linux-4.1.26/fs/ntfs/aops.c
---- linux-4.1.26.orig/fs/ntfs/aops.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/ntfs/aops.c 2016-06-19 15:30:58.659296270 +0200
-@@ -107,8 +107,7 @@
+diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
+index 7521e11db728..f0de4b6b8bf3 100644
+--- a/fs/ntfs/aops.c
++++ b/fs/ntfs/aops.c
+@@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
@@ -9398,7 +10384,7 @@ diff -Nur linux-4.1.26.orig/fs/ntfs/aops.c linux-4.1.26/fs/ntfs/aops.c
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -123,8 +122,7 @@
+@@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -9408,7 +10394,7 @@ diff -Nur linux-4.1.26.orig/fs/ntfs/aops.c linux-4.1.26/fs/ntfs/aops.c
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
-@@ -145,13 +143,13 @@
+@@ -145,13 +143,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
@@ -9424,7 +10410,7 @@ diff -Nur linux-4.1.26.orig/fs/ntfs/aops.c linux-4.1.26/fs/ntfs/aops.c
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
-@@ -159,9 +157,7 @@
+@@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unlock_page(page);
return;
still_busy:
@@ -9435,10 +10421,11 @@ diff -Nur linux-4.1.26.orig/fs/ntfs/aops.c linux-4.1.26/fs/ntfs/aops.c
}
/**
-diff -Nur linux-4.1.26.orig/fs/timerfd.c linux-4.1.26/fs/timerfd.c
---- linux-4.1.26.orig/fs/timerfd.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/timerfd.c 2016-06-19 15:30:58.659296270 +0200
-@@ -450,7 +450,10 @@
+diff --git a/fs/timerfd.c b/fs/timerfd.c
+index b94fa6c3c6eb..64fb86066237 100644
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -450,7 +450,10 @@ static int do_timerfd_settime(int ufd, int flags,
break;
}
spin_unlock_irq(&ctx->wqh.lock);
@@ -9450,10 +10437,11 @@ diff -Nur linux-4.1.26.orig/fs/timerfd.c linux-4.1.26/fs/timerfd.c
}
/*
-diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
---- linux-4.1.26.orig/fs/xfs/xfs_inode.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/xfs/xfs_inode.c 2016-06-19 15:30:58.659296270 +0200
-@@ -164,7 +164,7 @@
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index c29f34253e2b..f30d5b8cd5a1 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -164,7 +164,7 @@ xfs_ilock(
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
@@ -9462,7 +10450,7 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
if (lock_flags & XFS_IOLOCK_EXCL)
mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-@@ -212,7 +212,7 @@
+@@ -212,7 +212,7 @@ xfs_ilock_nowait(
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
@@ -9471,7 +10459,7 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
if (lock_flags & XFS_IOLOCK_EXCL) {
if (!mrtryupdate(&ip->i_iolock))
-@@ -281,7 +281,7 @@
+@@ -281,7 +281,7 @@ xfs_iunlock(
(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
@@ -9480,7 +10468,7 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
ASSERT(lock_flags != 0);
if (lock_flags & XFS_IOLOCK_EXCL)
-@@ -364,30 +364,38 @@
+@@ -364,30 +364,38 @@ int xfs_lock_delays;
/*
* Bump the subclass so xfs_lock_inodes() acquires each lock with a different
@@ -9532,7 +10520,7 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
}
/*
-@@ -399,6 +407,11 @@
+@@ -399,6 +407,11 @@ xfs_lock_inumorder(int lock_mode, int subclass)
* transaction (such as truncate). This can result in deadlock since the long
* running trans might need to wait for the inode we just locked in order to
* push the tail and free space in the log.
@@ -9544,7 +10532,7 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
*/
void
xfs_lock_inodes(
-@@ -409,8 +422,29 @@
+@@ -409,8 +422,29 @@ xfs_lock_inodes(
int attempts = 0, i, j, try_lock;
xfs_log_item_t *lp;
@@ -9575,10 +10563,11 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.c linux-4.1.26/fs/xfs/xfs_inode.c
try_lock = 0;
i = 0;
-diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.h linux-4.1.26/fs/xfs/xfs_inode.h
---- linux-4.1.26.orig/fs/xfs/xfs_inode.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/fs/xfs/xfs_inode.h 2016-06-19 15:30:58.659296270 +0200
-@@ -284,9 +284,9 @@
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
+index 8f22d20368d8..ee26a603c131 100644
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -284,9 +284,9 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
* Flags for lockdep annotations.
*
* XFS_LOCK_PARENT - for directory operations that require locking a
@@ -9591,7 +10580,7 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.h linux-4.1.26/fs/xfs/xfs_inode.h
*
* XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
* inodes do not participate in the normal lock order, and thus have their
-@@ -295,30 +295,63 @@
+@@ -295,30 +295,63 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
* XFS_LOCK_INUMORDER - for locking several inodes at the some time
* with xfs_lock_inodes(). This flag is used as the starting subclass
* and each subsequent lock acquired will increment the subclass by one.
@@ -9641,22 +10630,29 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.h linux-4.1.26/fs/xfs/xfs_inode.h
-
-#define XFS_IOLOCK_SHIFT 16
-#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
+-
+-#define XFS_MMAPLOCK_SHIFT 20
+-
+-#define XFS_ILOCK_SHIFT 24
+-#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
+-#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
+-#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
+-
+-#define XFS_IOLOCK_DEP_MASK 0x000f0000
+-#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
+-#define XFS_ILOCK_DEP_MASK 0xff000000
+-#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \
+#define XFS_IOLOCK_SHIFT 16
+#define XFS_IOLOCK_PARENT_VAL 4
+#define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1)
+#define XFS_IOLOCK_DEP_MASK 0x000f0000
+#define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
-
--#define XFS_MMAPLOCK_SHIFT 20
++
+#define XFS_MMAPLOCK_SHIFT 20
+#define XFS_MMAPLOCK_NUMORDER 0
+#define XFS_MMAPLOCK_MAX_SUBCLASS 3
+#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
-
--#define XFS_ILOCK_SHIFT 24
--#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
--#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
--#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
++
+#define XFS_ILOCK_SHIFT 24
+#define XFS_ILOCK_PARENT_VAL 5
+#define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1)
@@ -9666,18 +10662,15 @@ diff -Nur linux-4.1.26.orig/fs/xfs/xfs_inode.h linux-4.1.26/fs/xfs/xfs_inode.h
+#define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
+#define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
+#define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
-
--#define XFS_IOLOCK_DEP_MASK 0x000f0000
--#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
--#define XFS_ILOCK_DEP_MASK 0xff000000
--#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \
++
+#define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
XFS_MMAPLOCK_DEP_MASK | \
XFS_ILOCK_DEP_MASK)
-diff -Nur linux-4.1.26.orig/include/acpi/platform/aclinux.h linux-4.1.26/include/acpi/platform/aclinux.h
---- linux-4.1.26.orig/include/acpi/platform/aclinux.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/acpi/platform/aclinux.h 2016-06-19 15:30:58.659296270 +0200
+diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
+index 74ba46c8157a..ccde2a9ca7b7 100644
+--- a/include/acpi/platform/aclinux.h
++++ b/include/acpi/platform/aclinux.h
@@ -123,6 +123,7 @@
#define acpi_cache_t struct kmem_cache
@@ -9707,10 +10700,11 @@ diff -Nur linux-4.1.26.orig/include/acpi/platform/aclinux.h linux-4.1.26/include
/*
* OSL interfaces used by debugger/disassembler
*/
-diff -Nur linux-4.1.26.orig/include/asm-generic/bug.h linux-4.1.26/include/asm-generic/bug.h
---- linux-4.1.26.orig/include/asm-generic/bug.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/asm-generic/bug.h 2016-06-19 15:30:58.663296424 +0200
-@@ -206,6 +206,20 @@
+diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
+index 630dd2372238..850e4d993a88 100644
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -206,6 +206,20 @@ extern void warn_slowpath_null(const char *file, const int line);
# define WARN_ON_SMP(x) ({0;})
#endif
@@ -9731,9 +10725,10 @@ diff -Nur linux-4.1.26.orig/include/asm-generic/bug.h linux-4.1.26/include/asm-g
#endif /* __ASSEMBLY__ */
#endif
-diff -Nur linux-4.1.26.orig/include/asm-generic/futex.h linux-4.1.26/include/asm-generic/futex.h
---- linux-4.1.26.orig/include/asm-generic/futex.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/asm-generic/futex.h 2016-06-19 15:30:58.663296424 +0200
+diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
+index b59b5a52637e..e56272c919b5 100644
+--- a/include/asm-generic/futex.h
++++ b/include/asm-generic/futex.h
@@ -8,8 +8,7 @@
#ifndef CONFIG_SMP
/*
@@ -9744,7 +10739,7 @@ diff -Nur linux-4.1.26.orig/include/asm-generic/futex.h linux-4.1.26/include/asm
*
*/
-@@ -38,6 +37,7 @@
+@@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@@ -9752,7 +10747,7 @@ diff -Nur linux-4.1.26.orig/include/asm-generic/futex.h linux-4.1.26/include/asm
pagefault_disable();
ret = -EFAULT;
-@@ -72,6 +72,7 @@
+@@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
out_pagefault_enable:
pagefault_enable();
@@ -9760,7 +10755,7 @@ diff -Nur linux-4.1.26.orig/include/asm-generic/futex.h linux-4.1.26/include/asm
if (ret == 0) {
switch (cmp) {
-@@ -106,6 +107,7 @@
+@@ -106,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
u32 val;
@@ -9768,7 +10763,7 @@ diff -Nur linux-4.1.26.orig/include/asm-generic/futex.h linux-4.1.26/include/asm
if (unlikely(get_user(val, uaddr) != 0))
return -EFAULT;
-@@ -113,6 +115,7 @@
+@@ -113,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
*uval = val;
@@ -9776,10 +10771,40 @@ diff -Nur linux-4.1.26.orig/include/asm-generic/futex.h linux-4.1.26/include/asm
return 0;
}
-diff -Nur linux-4.1.26.orig/include/linux/blkdev.h linux-4.1.26/include/linux/blkdev.h
---- linux-4.1.26.orig/include/linux/blkdev.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/blkdev.h 2016-06-19 15:30:58.663296424 +0200
-@@ -101,6 +101,7 @@
+diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
+index b6a53e8e526a..c91d3d764c36 100644
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -7,10 +7,10 @@
+
+ static __always_inline int preempt_count(void)
+ {
+- return current_thread_info()->preempt_count;
++ return READ_ONCE(current_thread_info()->preempt_count);
+ }
+
+-static __always_inline int *preempt_count_ptr(void)
++static __always_inline volatile int *preempt_count_ptr(void)
+ {
+ return &current_thread_info()->preempt_count;
+ }
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 2056a99b92f8..e6ff990c9f11 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -202,6 +202,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
+
+ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+ struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
++void __blk_mq_complete_request_remote_work(struct work_struct *work);
+
+ int blk_mq_request_started(struct request *rq);
+ void blk_mq_start_request(struct request *rq);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 5d93a6645e88..37faf63af7f7 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -101,6 +101,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
@@ -9787,7 +10812,7 @@ diff -Nur linux-4.1.26.orig/include/linux/blkdev.h linux-4.1.26/include/linux/bl
unsigned long fifo_time;
};
-@@ -482,7 +483,7 @@
+@@ -482,7 +483,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
@@ -9796,20 +10821,10 @@ diff -Nur linux-4.1.26.orig/include/linux/blkdev.h linux-4.1.26/include/linux/bl
struct percpu_ref mq_usage_counter;
struct list_head all_q_node;
-diff -Nur linux-4.1.26.orig/include/linux/blk-mq.h linux-4.1.26/include/linux/blk-mq.h
---- linux-4.1.26.orig/include/linux/blk-mq.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/blk-mq.h 2016-06-19 15:30:58.663296424 +0200
-@@ -202,6 +202,7 @@
-
- struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
- struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
-+void __blk_mq_complete_request_remote_work(struct work_struct *work);
-
- int blk_mq_request_started(struct request *rq);
- void blk_mq_start_request(struct request *rq);
-diff -Nur linux-4.1.26.orig/include/linux/bottom_half.h linux-4.1.26/include/linux/bottom_half.h
---- linux-4.1.26.orig/include/linux/bottom_half.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/bottom_half.h 2016-06-19 15:30:58.663296424 +0200
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index 86c12c93e3cf..d3c0c02acc97 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
@@ -4,6 +4,39 @@
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
@@ -9850,17 +10865,18 @@ diff -Nur linux-4.1.26.orig/include/linux/bottom_half.h linux-4.1.26/include/lin
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
-@@ -31,5 +64,6 @@
+@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#endif
#endif /* _LINUX_BH_H */
-diff -Nur linux-4.1.26.orig/include/linux/buffer_head.h linux-4.1.26/include/linux/buffer_head.h
---- linux-4.1.26.orig/include/linux/buffer_head.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/buffer_head.h 2016-06-19 15:30:58.663296424 +0200
-@@ -75,8 +75,52 @@
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index e6797ded700e..6d25afd8b847 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -75,8 +75,52 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
@@ -9913,10 +10929,11 @@ diff -Nur linux-4.1.26.orig/include/linux/buffer_head.h linux-4.1.26/include/lin
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
-diff -Nur linux-4.1.26.orig/include/linux/cgroup-defs.h linux-4.1.26/include/linux/cgroup-defs.h
---- linux-4.1.26.orig/include/linux/cgroup-defs.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/cgroup-defs.h 2016-06-19 15:30:58.663296424 +0200
-@@ -124,6 +124,7 @@
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 8d9c7e7a6432..3fd10743e452 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -124,6 +124,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -9924,9 +10941,10 @@ diff -Nur linux-4.1.26.orig/include/linux/cgroup-defs.h linux-4.1.26/include/lin
};
/*
-diff -Nur linux-4.1.26.orig/include/linux/cgroup.h linux-4.1.26/include/linux/cgroup.h
---- linux-4.1.26.orig/include/linux/cgroup.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/cgroup.h 2016-06-19 15:30:58.663296424 +0200
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 96a2ecd5aa69..7f08633d839a 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
@@ -17,6 +17,8 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
@@ -9936,9 +10954,10 @@ diff -Nur linux-4.1.26.orig/include/linux/cgroup.h linux-4.1.26/include/linux/cg
#include <linux/cgroup-defs.h>
-diff -Nur linux-4.1.26.orig/include/linux/completion.h linux-4.1.26/include/linux/completion.h
---- linux-4.1.26.orig/include/linux/completion.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/completion.h 2016-06-19 15:30:58.663296424 +0200
+diff --git a/include/linux/completion.h b/include/linux/completion.h
+index 5d5aaae3af43..3fe8d14c98c0 100644
+--- a/include/linux/completion.h
++++ b/include/linux/completion.h
@@ -7,8 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
@@ -9963,7 +10982,7 @@ diff -Nur linux-4.1.26.orig/include/linux/completion.h linux-4.1.26/include/linu
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
-@@ -73,7 +72,7 @@
+@@ -73,7 +72,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
@@ -9972,10 +10991,11 @@ diff -Nur linux-4.1.26.orig/include/linux/completion.h linux-4.1.26/include/linu
}
/**
-diff -Nur linux-4.1.26.orig/include/linux/cpu.h linux-4.1.26/include/linux/cpu.h
---- linux-4.1.26.orig/include/linux/cpu.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/cpu.h 2016-06-19 15:30:58.663296424 +0200
-@@ -231,6 +231,8 @@
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index c0fb6b1b4712..2a22c7c729bc 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -231,6 +231,8 @@ extern bool try_get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -9984,7 +11004,7 @@ diff -Nur linux-4.1.26.orig/include/linux/cpu.h linux-4.1.26/include/linux/cpu.h
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-@@ -249,6 +251,8 @@
+@@ -249,6 +251,8 @@ static inline void cpu_hotplug_done(void) {}
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
@@ -9993,10 +11013,11 @@ diff -Nur linux-4.1.26.orig/include/linux/cpu.h linux-4.1.26/include/linux/cpu.h
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
-diff -Nur linux-4.1.26.orig/include/linux/delay.h linux-4.1.26/include/linux/delay.h
---- linux-4.1.26.orig/include/linux/delay.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/delay.h 2016-06-19 15:30:58.663296424 +0200
-@@ -52,4 +52,10 @@
+diff --git a/include/linux/delay.h b/include/linux/delay.h
+index a6ecb34cf547..37caab306336 100644
+--- a/include/linux/delay.h
++++ b/include/linux/delay.h
+@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
msleep(seconds * 1000);
}
@@ -10007,23 +11028,11 @@ diff -Nur linux-4.1.26.orig/include/linux/delay.h linux-4.1.26/include/linux/del
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */
-diff -Nur linux-4.1.26.orig/include/linux/ftrace_event.h linux-4.1.26/include/linux/ftrace_event.h
---- linux-4.1.26.orig/include/linux/ftrace_event.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/ftrace_event.h 2016-06-19 15:30:58.663296424 +0200
-@@ -66,6 +66,9 @@
- unsigned char flags;
- unsigned char preempt_count;
- int pid;
-+ unsigned short migrate_disable;
-+ unsigned short padding;
-+ unsigned char preempt_lazy_count;
- };
-
- #define FTRACE_MAX_EVENT \
-diff -Nur linux-4.1.26.orig/include/linux/ftrace.h linux-4.1.26/include/linux/ftrace.h
---- linux-4.1.26.orig/include/linux/ftrace.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/ftrace.h 2016-06-19 15:30:58.663296424 +0200
-@@ -682,6 +682,18 @@
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 6cd8c0ee4b6f..1ec37fef6355 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -682,6 +682,18 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
@@ -10042,9 +11051,24 @@ diff -Nur linux-4.1.26.orig/include/linux/ftrace.h linux-4.1.26/include/linux/ft
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
-diff -Nur linux-4.1.26.orig/include/linux/highmem.h linux-4.1.26/include/linux/highmem.h
---- linux-4.1.26.orig/include/linux/highmem.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/highmem.h 2016-06-19 15:30:58.663296424 +0200
+diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
+index f9ecf63d47f1..2ef42aa7e484 100644
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -66,6 +66,9 @@ struct trace_entry {
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
++ unsigned short migrate_disable;
++ unsigned short padding;
++ unsigned char preempt_lazy_count;
+ };
+
+ #define FTRACE_MAX_EVENT \
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 9286a46b7d69..06bae5a6761d 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -10053,7 +11077,7 @@ diff -Nur linux-4.1.26.orig/include/linux/highmem.h linux-4.1.26/include/linux/h
#include <asm/cacheflush.h>
-@@ -65,6 +66,7 @@
+@@ -65,6 +66,7 @@ static inline void kunmap(struct page *page)
static inline void *kmap_atomic(struct page *page)
{
@@ -10061,7 +11085,7 @@ diff -Nur linux-4.1.26.orig/include/linux/highmem.h linux-4.1.26/include/linux/h
pagefault_disable();
return page_address(page);
}
-@@ -73,6 +75,7 @@
+@@ -73,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
@@ -10069,7 +11093,7 @@ diff -Nur linux-4.1.26.orig/include/linux/highmem.h linux-4.1.26/include/linux/h
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-@@ -85,32 +88,51 @@
+@@ -85,32 +88,51 @@ static inline void __kunmap_atomic(void *addr)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
@@ -10125,10 +11149,11 @@ diff -Nur linux-4.1.26.orig/include/linux/highmem.h linux-4.1.26/include/linux/h
#endif
}
-diff -Nur linux-4.1.26.orig/include/linux/hrtimer.h linux-4.1.26/include/linux/hrtimer.h
---- linux-4.1.26.orig/include/linux/hrtimer.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/hrtimer.h 2016-06-19 15:30:58.663296424 +0200
-@@ -111,6 +111,11 @@
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 05f6df1fdf5b..64e1abb3715b 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -111,6 +111,11 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
@@ -10140,7 +11165,7 @@ diff -Nur linux-4.1.26.orig/include/linux/hrtimer.h linux-4.1.26/include/linux/h
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
-@@ -147,6 +152,7 @@
+@@ -147,6 +152,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
@@ -10148,7 +11173,7 @@ diff -Nur linux-4.1.26.orig/include/linux/hrtimer.h linux-4.1.26/include/linux/h
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
-@@ -194,6 +200,9 @@
+@@ -194,6 +200,9 @@ struct hrtimer_cpu_base {
unsigned long nr_hangs;
ktime_t max_hang_time;
#endif
@@ -10158,7 +11183,7 @@ diff -Nur linux-4.1.26.orig/include/linux/hrtimer.h linux-4.1.26/include/linux/h
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
};
-@@ -381,6 +390,13 @@
+@@ -381,6 +390,13 @@ static inline int hrtimer_restart(struct hrtimer *timer)
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@@ -10172,10 +11197,11 @@ diff -Nur linux-4.1.26.orig/include/linux/hrtimer.h linux-4.1.26/include/linux/h
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-diff -Nur linux-4.1.26.orig/include/linux/idr.h linux-4.1.26/include/linux/idr.h
---- linux-4.1.26.orig/include/linux/idr.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/idr.h 2016-06-19 15:30:58.663296424 +0200
-@@ -95,10 +95,14 @@
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index 013fd9bc4cb6..f62be0aec911 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
@@ -10190,10 +11216,11 @@ diff -Nur linux-4.1.26.orig/include/linux/idr.h linux-4.1.26/include/linux/idr.h
/**
* idr_find - return pointer for given id
-diff -Nur linux-4.1.26.orig/include/linux/init_task.h linux-4.1.26/include/linux/init_task.h
---- linux-4.1.26.orig/include/linux/init_task.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/init_task.h 2016-06-19 15:30:58.667296578 +0200
-@@ -147,9 +147,16 @@
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 696d22312b31..4a77d39ff7dd 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -147,9 +147,16 @@ extern struct task_group root_task_group;
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -10211,7 +11238,7 @@ diff -Nur linux-4.1.26.orig/include/linux/init_task.h linux-4.1.26/include/linux
.vtime_snap = 0, \
.vtime_snap_whence = VTIME_SYS,
#else
-@@ -238,6 +245,7 @@
+@@ -238,6 +245,7 @@ extern struct task_group root_task_group;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
@@ -10219,9 +11246,10 @@ diff -Nur linux-4.1.26.orig/include/linux/init_task.h linux-4.1.26/include/linux
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux/interrupt.h
---- linux-4.1.26.orig/include/linux/interrupt.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/interrupt.h 2016-06-19 15:30:58.667296578 +0200
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 950ae4501826..d11fd0a440ff 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
@@ -61,6 +61,7 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
@@ -10238,7 +11266,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
-@@ -102,6 +104,7 @@
+@@ -102,6 +104,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @flags: flags (see IRQF_* above)
* @thread_fn: interrupt handler function for threaded interrupts
* @thread: thread pointer for threaded interrupts
@@ -10246,7 +11274,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
* @thread_flags: flags related to @thread
* @thread_mask: bitmask for keeping track of @thread activity
* @dir: pointer to the proc/irq/NN/name entry
-@@ -113,6 +116,7 @@
+@@ -113,6 +116,7 @@ struct irqaction {
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
@@ -10254,7 +11282,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
unsigned int irq;
unsigned int flags;
unsigned long thread_flags;
-@@ -184,7 +188,7 @@
+@@ -184,7 +188,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
@@ -10263,7 +11291,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
#endif
extern void disable_irq_nosync(unsigned int irq);
-@@ -215,6 +219,7 @@
+@@ -215,6 +219,7 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
@@ -10271,7 +11299,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
-@@ -377,9 +382,13 @@
+@@ -377,9 +382,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
@@ -10286,7 +11314,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
-@@ -435,9 +444,10 @@
+@@ -435,9 +444,10 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
@@ -10298,7 +11326,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
-@@ -446,13 +456,25 @@
+@@ -446,13 +456,25 @@ static inline void do_softirq_own_stack(void)
__do_softirq();
}
#endif
@@ -10324,7 +11352,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -474,8 +496,9 @@
+@@ -474,8 +496,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -10336,7 +11364,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -500,27 +523,36 @@
+@@ -500,27 +523,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -10379,7 +11407,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -569,12 +601,7 @@
+@@ -569,12 +601,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
smp_mb();
}
@@ -10393,7 +11421,7 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
-@@ -605,6 +632,12 @@
+@@ -605,6 +632,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
tasklet_kill(&ttimer->tasklet);
}
@@ -10406,10 +11434,11 @@ diff -Nur linux-4.1.26.orig/include/linux/interrupt.h linux-4.1.26/include/linux
/*
* Autoprobing for irqs:
*
-diff -Nur linux-4.1.26.orig/include/linux/io-mapping.h linux-4.1.26/include/linux/io-mapping.h
---- linux-4.1.26.orig/include/linux/io-mapping.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/io-mapping.h 2016-06-19 15:30:58.667296578 +0200
-@@ -141,6 +141,7 @@
+diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
+index 657fab4efab3..c27dde7215b5 100644
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -141,6 +141,7 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
@@ -10417,7 +11446,7 @@ diff -Nur linux-4.1.26.orig/include/linux/io-mapping.h linux-4.1.26/include/linu
pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
-@@ -149,6 +150,7 @@
+@@ -149,6 +150,7 @@ static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
pagefault_enable();
@@ -10425,10 +11454,62 @@ diff -Nur linux-4.1.26.orig/include/linux/io-mapping.h linux-4.1.26/include/linu
}
/* Non-atomic map/unmap */
-diff -Nur linux-4.1.26.orig/include/linux/irqdesc.h linux-4.1.26/include/linux/irqdesc.h
---- linux-4.1.26.orig/include/linux/irqdesc.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/irqdesc.h 2016-06-19 15:30:58.667296578 +0200
-@@ -63,6 +63,7 @@
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 33475a37f1bb..00e8834d2e02 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -72,6 +72,7 @@ enum irqchip_irq_state;
+ * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
+ * it from the spurious interrupt detection
+ * mechanism and from core side polling.
++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ */
+ enum {
+ IRQ_TYPE_NONE = 0x00000000,
+@@ -97,13 +98,14 @@ enum {
+ IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_IS_POLLED = (1 << 18),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 19),
+ };
+
+ #define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
+- IRQ_IS_POLLED)
++ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
+
+ #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index 47b9ebd4a74f..2543aab05daa 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -16,6 +16,7 @@
+ #define IRQ_WORK_BUSY 2UL
+ #define IRQ_WORK_FLAGS 3UL
+ #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
+
+ struct irq_work {
+ unsigned long flags;
+@@ -51,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
+ static inline void irq_work_run(void) { }
+ #endif
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++void irq_work_tick_soft(void);
++#else
++static inline void irq_work_tick_soft(void) { }
++#endif
++
+ #endif /* _LINUX_IRQ_WORK_H */
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index dd1109fb241e..9d97cd5bb7c7 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -63,6 +63,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
@@ -10436,9 +11517,10 @@ diff -Nur linux-4.1.26.orig/include/linux/irqdesc.h linux-4.1.26/include/linux/i
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
-diff -Nur linux-4.1.26.orig/include/linux/irqflags.h linux-4.1.26/include/linux/irqflags.h
---- linux-4.1.26.orig/include/linux/irqflags.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/irqflags.h 2016-06-19 15:30:58.667296578 +0200
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 5dd1272d1ab2..9b77034f7c5e 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
@@ -25,8 +25,6 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
@@ -10489,59 +11571,11 @@ diff -Nur linux-4.1.26.orig/include/linux/irqflags.h linux-4.1.26/include/linux/
+#endif
+
#endif
-diff -Nur linux-4.1.26.orig/include/linux/irq.h linux-4.1.26/include/linux/irq.h
---- linux-4.1.26.orig/include/linux/irq.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/irq.h 2016-06-19 15:30:58.667296578 +0200
-@@ -72,6 +72,7 @@
- * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
- * it from the spurious interrupt detection
- * mechanism and from core side polling.
-+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
- */
- enum {
- IRQ_TYPE_NONE = 0x00000000,
-@@ -97,13 +98,14 @@
- IRQ_NOTHREAD = (1 << 16),
- IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_IS_POLLED = (1 << 18),
-+ IRQ_NO_SOFTIRQ_CALL = (1 << 19),
- };
-
- #define IRQF_MODIFY_MASK \
- (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-- IRQ_IS_POLLED)
-+ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
-
- #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-
-diff -Nur linux-4.1.26.orig/include/linux/irq_work.h linux-4.1.26/include/linux/irq_work.h
---- linux-4.1.26.orig/include/linux/irq_work.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/irq_work.h 2016-06-19 15:30:58.667296578 +0200
-@@ -16,6 +16,7 @@
- #define IRQ_WORK_BUSY 2UL
- #define IRQ_WORK_FLAGS 3UL
- #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
-+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
-
- struct irq_work {
- unsigned long flags;
-@@ -51,4 +52,10 @@
- static inline void irq_work_run(void) { }
- #endif
-
-+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
-+void irq_work_tick_soft(void);
-+#else
-+static inline void irq_work_tick_soft(void) { }
-+#endif
-+
- #endif /* _LINUX_IRQ_WORK_H */
-diff -Nur linux-4.1.26.orig/include/linux/jbd_common.h linux-4.1.26/include/linux/jbd_common.h
---- linux-4.1.26.orig/include/linux/jbd_common.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/jbd_common.h 2016-06-19 15:30:58.667296578 +0200
-@@ -15,32 +15,56 @@
+diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
+index 3dc53432355f..a90a6f5ca899 100644
+--- a/include/linux/jbd_common.h
++++ b/include/linux/jbd_common.h
+@@ -15,32 +15,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
@@ -10598,10 +11632,11 @@ diff -Nur linux-4.1.26.orig/include/linux/jbd_common.h linux-4.1.26/include/linu
}
#endif
-diff -Nur linux-4.1.26.orig/include/linux/kdb.h linux-4.1.26/include/linux/kdb.h
---- linux-4.1.26.orig/include/linux/kdb.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/kdb.h 2016-06-19 15:30:58.667296578 +0200
-@@ -167,6 +167,7 @@
+diff --git a/include/linux/kdb.h b/include/linux/kdb.h
+index a19bcf9e762e..897495386446 100644
+--- a/include/linux/kdb.h
++++ b/include/linux/kdb.h
+@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
@@ -10609,7 +11644,7 @@ diff -Nur linux-4.1.26.orig/include/linux/kdb.h linux-4.1.26/include/linux/kdb.h
extern void kdb_init(int level);
/* Access to kdb specific polling devices */
-@@ -201,6 +202,7 @@
+@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
@@ -10617,10 +11652,11 @@ diff -Nur linux-4.1.26.orig/include/linux/kdb.h linux-4.1.26/include/linux/kdb.h
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-diff -Nur linux-4.1.26.orig/include/linux/kernel.h linux-4.1.26/include/linux/kernel.h
---- linux-4.1.26.orig/include/linux/kernel.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/kernel.h 2016-06-19 15:30:58.667296578 +0200
-@@ -188,6 +188,9 @@
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index d837f2a41665..2f4ce318c4fb 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -188,6 +188,9 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -10630,7 +11666,7 @@ diff -Nur linux-4.1.26.orig/include/linux/kernel.h linux-4.1.26/include/linux/ke
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
-@@ -195,6 +198,7 @@
+@@ -195,6 +198,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
@@ -10638,7 +11674,7 @@ diff -Nur linux-4.1.26.orig/include/linux/kernel.h linux-4.1.26/include/linux/ke
# define sched_annotate_sleep() do { } while (0)
#endif
-@@ -244,7 +248,8 @@
+@@ -244,7 +248,8 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
@@ -10648,7 +11684,7 @@ diff -Nur linux-4.1.26.orig/include/linux/kernel.h linux-4.1.26/include/linux/ke
#else
static inline void might_fault(void) { }
#endif
-@@ -466,6 +471,7 @@
+@@ -466,6 +471,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
@@ -10656,10 +11692,11 @@ diff -Nur linux-4.1.26.orig/include/linux/kernel.h linux-4.1.26/include/linux/ke
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
-diff -Nur linux-4.1.26.orig/include/linux/kvm_host.h linux-4.1.26/include/linux/kvm_host.h
---- linux-4.1.26.orig/include/linux/kvm_host.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/kvm_host.h 2016-06-19 15:30:58.667296578 +0200
-@@ -230,7 +230,7 @@
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 29a57a5b7cee..a081a0316379 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -230,7 +230,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
@@ -10668,7 +11705,7 @@ diff -Nur linux-4.1.26.orig/include/linux/kvm_host.h linux-4.1.26/include/linux/
struct pid *pid;
int sigset_active;
sigset_t sigset;
-@@ -701,7 +701,7 @@
+@@ -701,7 +701,7 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
}
#endif
@@ -10677,9 +11714,10 @@ diff -Nur linux-4.1.26.orig/include/linux/kvm_host.h linux-4.1.26/include/linux/
{
#ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp;
-diff -Nur linux-4.1.26.orig/include/linux/lglock.h linux-4.1.26/include/linux/lglock.h
---- linux-4.1.26.orig/include/linux/lglock.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/lglock.h 2016-06-19 15:30:58.667296578 +0200
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index 0081f000e34b..9603a1500267 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
@@ -34,22 +34,39 @@
#endif
@@ -10722,7 +11760,7 @@ diff -Nur linux-4.1.26.orig/include/linux/lglock.h linux-4.1.26/include/linux/lg
void lg_lock_init(struct lglock *lg, char *name);
void lg_local_lock(struct lglock *lg);
-@@ -59,6 +76,12 @@
+@@ -59,6 +76,12 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu);
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
@@ -10735,9 +11773,10 @@ diff -Nur linux-4.1.26.orig/include/linux/lglock.h linux-4.1.26/include/linux/lg
#else
/* When !CONFIG_SMP, map lglock to spinlock */
#define lglock spinlock
-diff -Nur linux-4.1.26.orig/include/linux/list_bl.h linux-4.1.26/include/linux/list_bl.h
---- linux-4.1.26.orig/include/linux/list_bl.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/list_bl.h 2016-06-19 15:30:58.667296578 +0200
+diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
+index 2eb88556c5c5..017d0f1c1eb4 100644
+--- a/include/linux/list_bl.h
++++ b/include/linux/list_bl.h
@@ -2,6 +2,7 @@
#define _LINUX_LIST_BL_H
@@ -10746,7 +11785,7 @@ diff -Nur linux-4.1.26.orig/include/linux/list_bl.h linux-4.1.26/include/linux/l
#include <linux/bit_spinlock.h>
/*
-@@ -32,13 +33,22 @@
+@@ -32,13 +33,24 @@
struct hlist_bl_head {
struct hlist_bl_node *first;
@@ -10761,17 +11800,19 @@ diff -Nur linux-4.1.26.orig/include/linux/list_bl.h linux-4.1.26/include/linux/l
-#define INIT_HLIST_BL_HEAD(ptr) \
- ((ptr)->first = NULL)
+
-+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
-+{
-+ h->first = NULL;
+#ifdef CONFIG_PREEMPT_RT_BASE
-+ raw_spin_lock_init(&h->lock);
++#define INIT_HLIST_BL_HEAD(h) \
++do { \
++ (h)->first = NULL; \
++ raw_spin_lock_init(&(h)->lock); \
++} while (0)
++#else
++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
+#endif
-+}
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
-@@ -117,12 +127,26 @@
+@@ -117,12 +129,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
@@ -10798,10 +11839,12 @@ diff -Nur linux-4.1.26.orig/include/linux/list_bl.h linux-4.1.26/include/linux/l
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
-diff -Nur linux-4.1.26.orig/include/linux/locallock.h linux-4.1.26/include/linux/locallock.h
---- linux-4.1.26.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/locallock.h 2016-06-19 15:30:58.667296578 +0200
-@@ -0,0 +1,270 @@
+diff --git a/include/linux/locallock.h b/include/linux/locallock.h
+new file mode 100644
+index 000000000000..0edbf192f6d1
+--- /dev/null
++++ b/include/linux/locallock.h
+@@ -0,0 +1,276 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
+
@@ -10870,6 +11913,9 @@ diff -Nur linux-4.1.26.orig/include/linux/locallock.h linux-4.1.26/include/linux
+#define local_lock(lvar) \
+ do { __local_lock(&get_local_var(lvar)); } while (0)
+
++#define local_lock_on(lvar, cpu) \
++ do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
++
+static inline int __local_trylock(struct local_irq_lock *lv)
+{
+ if (lv->owner != current && spin_trylock_local(&lv->lock)) {
@@ -10908,6 +11954,9 @@ diff -Nur linux-4.1.26.orig/include/linux/locallock.h linux-4.1.26/include/linux
+ put_local_var(lvar); \
+ } while (0)
+
++#define local_unlock_on(lvar, cpu) \
++ do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
++
+static inline void __local_lock_irq(struct local_irq_lock *lv)
+{
+ spin_lock_irqsave(&lv->lock, lv->flags);
@@ -11072,9 +12121,10 @@ diff -Nur linux-4.1.26.orig/include/linux/locallock.h linux-4.1.26/include/linux
+#endif
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/mm_types.h linux-4.1.26/include/linux/mm_types.h
---- linux-4.1.26.orig/include/linux/mm_types.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/mm_types.h 2016-06-19 15:30:58.667296578 +0200
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index c0c6b33535fb..89c047144b1f 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
@@ -11,6 +11,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -11083,7 +12133,7 @@ diff -Nur linux-4.1.26.orig/include/linux/mm_types.h linux-4.1.26/include/linux/
#include <linux/page-flags-layout.h>
#include <asm/page.h>
#include <asm/mmu.h>
-@@ -453,6 +454,9 @@
+@@ -453,6 +454,9 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
@@ -11093,9 +12143,10 @@ diff -Nur linux-4.1.26.orig/include/linux/mm_types.h linux-4.1.26/include/linux/
#ifdef CONFIG_X86_INTEL_MPX
/* address of the bounds directory */
void __user *bd_addr;
-diff -Nur linux-4.1.26.orig/include/linux/mutex.h linux-4.1.26/include/linux/mutex.h
---- linux-4.1.26.orig/include/linux/mutex.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/mutex.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 2cb7531e7d7a..b3fdfc820216 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
@@ -19,6 +19,17 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
@@ -11114,7 +12165,7 @@ diff -Nur linux-4.1.26.orig/include/linux/mutex.h linux-4.1.26/include/linux/mut
/*
* Simple, straightforward mutexes with strict semantics:
*
-@@ -99,13 +110,6 @@
+@@ -99,13 +110,6 @@ do { \
static inline void mutex_destroy(struct mutex *lock) {}
#endif
@@ -11128,7 +12179,7 @@ diff -Nur linux-4.1.26.orig/include/linux/mutex.h linux-4.1.26/include/linux/mut
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -173,6 +177,8 @@
+@@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
@@ -11137,9 +12188,11 @@ diff -Nur linux-4.1.26.orig/include/linux/mutex.h linux-4.1.26/include/linux/mut
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif /* __LINUX_MUTEX_H */
-diff -Nur linux-4.1.26.orig/include/linux/mutex_rt.h linux-4.1.26/include/linux/mutex_rt.h
---- linux-4.1.26.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/mutex_rt.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
+new file mode 100644
+index 000000000000..c38a44b14da5
+--- /dev/null
++++ b/include/linux/mutex_rt.h
@@ -0,0 +1,84 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
@@ -11225,10 +12278,11 @@ diff -Nur linux-4.1.26.orig/include/linux/mutex_rt.h linux-4.1.26/include/linux/
+} while (0)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/netdevice.h linux-4.1.26/include/linux/netdevice.h
---- linux-4.1.26.orig/include/linux/netdevice.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/netdevice.h 2016-06-19 15:30:58.671296732 +0200
-@@ -2192,11 +2192,20 @@
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 6c86c7edafa7..27b25e97c3d4 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2193,11 +2193,20 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -11249,7 +12303,7 @@ diff -Nur linux-4.1.26.orig/include/linux/netdevice.h linux-4.1.26/include/linux
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
-@@ -2469,6 +2478,7 @@
+@@ -2488,6 +2497,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -11257,9 +12311,10 @@ diff -Nur linux-4.1.26.orig/include/linux/netdevice.h linux-4.1.26/include/linux
};
-diff -Nur linux-4.1.26.orig/include/linux/netfilter/x_tables.h linux-4.1.26/include/linux/netfilter/x_tables.h
---- linux-4.1.26.orig/include/linux/netfilter/x_tables.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/netfilter/x_tables.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index cc615e273f80..1a6ba6d7ff8b 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
@@ -11268,7 +12323,7 @@ diff -Nur linux-4.1.26.orig/include/linux/netfilter/x_tables.h linux-4.1.26/incl
#include <uapi/linux/netfilter/x_tables.h>
/**
-@@ -282,6 +283,8 @@
+@@ -293,6 +294,8 @@ void xt_free_table_info(struct xt_table_info *info);
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -11277,7 +12332,7 @@ diff -Nur linux-4.1.26.orig/include/linux/netfilter/x_tables.h linux-4.1.26/incl
/**
* xt_write_recseq_begin - start of a write section
*
-@@ -296,6 +299,9 @@
+@@ -307,6 +310,9 @@ static inline unsigned int xt_write_recseq_begin(void)
{
unsigned int addend;
@@ -11287,7 +12342,7 @@ diff -Nur linux-4.1.26.orig/include/linux/netfilter/x_tables.h linux-4.1.26/incl
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -326,6 +332,7 @@
+@@ -337,6 +343,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
@@ -11295,9 +12350,10 @@ diff -Nur linux-4.1.26.orig/include/linux/netfilter/x_tables.h linux-4.1.26/incl
}
/*
-diff -Nur linux-4.1.26.orig/include/linux/notifier.h linux-4.1.26/include/linux/notifier.h
---- linux-4.1.26.orig/include/linux/notifier.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/notifier.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index d14a4c362465..2e4414a0c1c4 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
@@ -11318,7 +12374,7 @@ diff -Nur linux-4.1.26.orig/include/linux/notifier.h linux-4.1.26/include/linux/
*/
typedef int (*notifier_fn_t)(struct notifier_block *nb,
-@@ -88,7 +86,7 @@
+@@ -88,7 +86,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
@@ -11327,7 +12383,7 @@ diff -Nur linux-4.1.26.orig/include/linux/notifier.h linux-4.1.26/include/linux/
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
-@@ -101,7 +99,13 @@
+@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
@@ -11342,7 +12398,7 @@ diff -Nur linux-4.1.26.orig/include/linux/notifier.h linux-4.1.26/include/linux/
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
-@@ -113,6 +117,18 @@
+@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
@@ -11361,7 +12417,7 @@ diff -Nur linux-4.1.26.orig/include/linux/notifier.h linux-4.1.26/include/linux/
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
-@@ -182,12 +198,12 @@
+@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int ret)
/*
* Declared notifiers so far. I can imagine quite a few more chains
@@ -11377,9 +12433,10 @@ diff -Nur linux-4.1.26.orig/include/linux/notifier.h linux-4.1.26/include/linux/
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
-diff -Nur linux-4.1.26.orig/include/linux/percpu.h linux-4.1.26/include/linux/percpu.h
---- linux-4.1.26.orig/include/linux/percpu.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/percpu.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/percpu.h b/include/linux/percpu.h
+index caebf2a758dc..53a60a51c758 100644
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
@@ -24,6 +24,35 @@
PERCPU_MODULE_RESERVE)
#endif
@@ -11416,9 +12473,10 @@ diff -Nur linux-4.1.26.orig/include/linux/percpu.h linux-4.1.26/include/linux/pe
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-diff -Nur linux-4.1.26.orig/include/linux/pid.h linux-4.1.26/include/linux/pid.h
---- linux-4.1.26.orig/include/linux/pid.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/pid.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index 23705a53abba..2cc64b779f03 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
@@ -2,6 +2,7 @@
#define _LINUX_PID_H
@@ -11427,10 +12485,11 @@ diff -Nur linux-4.1.26.orig/include/linux/pid.h linux-4.1.26/include/linux/pid.h
enum pid_type
{
-diff -Nur linux-4.1.26.orig/include/linux/platform_data/gpio-omap.h linux-4.1.26/include/linux/platform_data/gpio-omap.h
---- linux-4.1.26.orig/include/linux/platform_data/gpio-omap.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/platform_data/gpio-omap.h 2016-06-19 15:30:58.671296732 +0200
-@@ -198,7 +198,6 @@
+diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
+index 5d50b25a73d7..ff43e01b8ca9 100644
+--- a/include/linux/platform_data/gpio-omap.h
++++ b/include/linux/platform_data/gpio-omap.h
+@@ -198,7 +198,6 @@ struct omap_gpio_platform_data {
int bank_width; /* GPIO bank width */
int bank_stride; /* Only needed for omap1 MPUIO */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
@@ -11438,7 +12497,7 @@ diff -Nur linux-4.1.26.orig/include/linux/platform_data/gpio-omap.h linux-4.1.26
bool is_mpuio; /* whether the bank is of type MPUIO */
u32 non_wakeup_gpios;
-@@ -208,9 +207,17 @@
+@@ -208,9 +207,17 @@ struct omap_gpio_platform_data {
int (*get_context_loss_count)(struct device *dev);
};
@@ -11458,10 +12517,11 @@ diff -Nur linux-4.1.26.orig/include/linux/platform_data/gpio-omap.h linux-4.1.26
+#endif
#endif
-diff -Nur linux-4.1.26.orig/include/linux/preempt.h linux-4.1.26/include/linux/preempt.h
---- linux-4.1.26.orig/include/linux/preempt.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/preempt.h 2016-06-19 15:30:58.671296732 +0200
-@@ -34,6 +34,20 @@
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 8cd6725c5758..8fa1d21dab70 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -34,6 +34,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -11482,7 +12542,7 @@ diff -Nur linux-4.1.26.orig/include/linux/preempt.h linux-4.1.26/include/linux/p
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
-@@ -42,13 +56,25 @@
+@@ -42,13 +56,25 @@ do { \
barrier(); \
} while (0)
@@ -11509,7 +12569,7 @@ diff -Nur linux-4.1.26.orig/include/linux/preempt.h linux-4.1.26/include/linux/p
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
-@@ -64,6 +90,13 @@
+@@ -64,6 +90,13 @@ do { \
__preempt_schedule(); \
} while (0)
@@ -11523,7 +12583,7 @@ diff -Nur linux-4.1.26.orig/include/linux/preempt.h linux-4.1.26/include/linux/p
#else
#define preempt_enable() \
do { \
-@@ -122,6 +155,7 @@
+@@ -122,6 +155,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
@@ -11531,7 +12591,7 @@ diff -Nur linux-4.1.26.orig/include/linux/preempt.h linux-4.1.26/include/linux/p
#endif /* CONFIG_PREEMPT_COUNT */
-@@ -141,10 +175,31 @@
+@@ -141,10 +175,31 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -11564,9 +12624,10 @@ diff -Nur linux-4.1.26.orig/include/linux/preempt.h linux-4.1.26/include/linux/p
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
-diff -Nur linux-4.1.26.orig/include/linux/preempt_mask.h linux-4.1.26/include/linux/preempt_mask.h
---- linux-4.1.26.orig/include/linux/preempt_mask.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/preempt_mask.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
+index 5cb25f17331a..26a33802dae2 100644
+--- a/include/linux/preempt_mask.h
++++ b/include/linux/preempt_mask.h
@@ -44,16 +44,26 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
@@ -11616,10 +12677,11 @@ diff -Nur linux-4.1.26.orig/include/linux/preempt_mask.h linux-4.1.26/include/li
/*
* The preempt_count offset needed for things like:
-diff -Nur linux-4.1.26.orig/include/linux/printk.h linux-4.1.26/include/linux/printk.h
---- linux-4.1.26.orig/include/linux/printk.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/printk.h 2016-06-19 15:30:58.671296732 +0200
-@@ -115,9 +115,11 @@
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 9b30871c9149..08d0a7574fcf 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -115,9 +115,11 @@ int no_printk(const char *fmt, ...)
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -11631,10 +12693,11 @@ diff -Nur linux-4.1.26.orig/include/linux/printk.h linux-4.1.26/include/linux/pr
#endif
typedef int(*printk_func_t)(const char *fmt, va_list args);
-diff -Nur linux-4.1.26.orig/include/linux/radix-tree.h linux-4.1.26/include/linux/radix-tree.h
---- linux-4.1.26.orig/include/linux/radix-tree.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/radix-tree.h 2016-06-19 15:30:58.671296732 +0200
-@@ -277,8 +277,13 @@
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 673dee29a9b9..9a80663a1574 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -277,8 +277,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
@@ -11648,7 +12711,7 @@ diff -Nur linux-4.1.26.orig/include/linux/radix-tree.h linux-4.1.26/include/linu
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
-@@ -303,7 +308,7 @@
+@@ -303,7 +308,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
@@ -11657,9 +12720,10 @@ diff -Nur linux-4.1.26.orig/include/linux/radix-tree.h linux-4.1.26/include/linu
}
/**
-diff -Nur linux-4.1.26.orig/include/linux/random.h linux-4.1.26/include/linux/random.h
---- linux-4.1.26.orig/include/linux/random.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/random.h 2016-06-19 15:30:58.671296732 +0200
+diff --git a/include/linux/random.h b/include/linux/random.h
+index b05856e16b75..4a64ad52dcb7 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
@@ -11,7 +11,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
@@ -11669,10 +12733,11 @@ diff -Nur linux-4.1.26.orig/include/linux/random.h linux-4.1.26/include/linux/ra
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
-diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/rcupdate.h
---- linux-4.1.26.orig/include/linux/rcupdate.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/rcupdate.h 2016-06-19 15:30:58.671296732 +0200
-@@ -167,6 +167,9 @@
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 573a5afd5ed8..5d090cdaaace 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -167,6 +167,9 @@ void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -11682,7 +12747,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -190,6 +193,7 @@
+@@ -190,6 +193,7 @@ void call_rcu(struct rcu_head *head,
*/
void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
@@ -11690,7 +12755,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -260,6 +264,11 @@
+@@ -260,6 +264,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -11702,7 +12767,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -283,6 +292,8 @@
+@@ -283,6 +292,8 @@ static inline int rcu_preempt_depth(void)
return 0;
}
@@ -11711,7 +12776,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */
-@@ -463,7 +474,14 @@
+@@ -463,7 +474,14 @@ extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
@@ -11726,7 +12791,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -990,10 +1008,14 @@
+@@ -990,10 +1008,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
@@ -11741,7 +12806,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
}
/*
-@@ -1003,10 +1025,14 @@
+@@ -1003,10 +1025,14 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
@@ -11756,10 +12821,11 @@ diff -Nur linux-4.1.26.orig/include/linux/rcupdate.h linux-4.1.26/include/linux/
local_bh_enable();
}
-diff -Nur linux-4.1.26.orig/include/linux/rcutree.h linux-4.1.26/include/linux/rcutree.h
---- linux-4.1.26.orig/include/linux/rcutree.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/rcutree.h 2016-06-19 15:30:58.671296732 +0200
-@@ -46,7 +46,11 @@
+diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
+index d2e583a6aaca..0b350893b46a 100644
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -46,7 +46,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch();
}
@@ -11771,7 +12837,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcutree.h linux-4.1.26/include/linux/r
void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
-@@ -74,7 +78,11 @@
+@@ -74,7 +78,11 @@ static inline void synchronize_rcu_bh_expedited(void)
}
void rcu_barrier(void);
@@ -11783,7 +12849,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcutree.h linux-4.1.26/include/linux/r
void rcu_barrier_sched(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
-@@ -85,12 +93,10 @@
+@@ -85,12 +93,10 @@ unsigned long rcu_batches_started(void);
unsigned long rcu_batches_started_bh(void);
unsigned long rcu_batches_started_sched(void);
unsigned long rcu_batches_completed(void);
@@ -11796,7 +12862,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rcutree.h linux-4.1.26/include/linux/r
void rcu_sched_force_quiescent_state(void);
void exit_rcu(void);
-@@ -100,6 +106,14 @@
+@@ -100,6 +106,14 @@ extern int rcu_scheduler_active __read_mostly;
bool rcu_is_watching(void);
@@ -11811,9 +12877,10 @@ diff -Nur linux-4.1.26.orig/include/linux/rcutree.h linux-4.1.26/include/linux/r
void rcu_all_qs(void);
#endif /* __LINUX_RCUTREE_H */
-diff -Nur linux-4.1.26.orig/include/linux/rtmutex.h linux-4.1.26/include/linux/rtmutex.h
---- linux-4.1.26.orig/include/linux/rtmutex.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/rtmutex.h 2016-06-19 15:30:58.675296887 +0200
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 1abba5ce2a2f..d5a04ea47a13 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
@@ -14,10 +14,14 @@
#include <linux/linkage.h>
@@ -11830,7 +12897,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rtmutex.h linux-4.1.26/include/linux/r
/**
* The rt_mutex structure
*
-@@ -31,8 +35,8 @@
+@@ -31,8 +35,8 @@ struct rt_mutex {
struct rb_root waiters;
struct rb_node *waiters_leftmost;
struct task_struct *owner;
@@ -11840,7 +12907,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rtmutex.h linux-4.1.26/include/linux/r
const char *name, *file;
int line;
void *magic;
-@@ -55,22 +59,33 @@
+@@ -55,22 +59,33 @@ struct hrtimer_sleeper;
# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
#endif
@@ -11879,7 +12946,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rtmutex.h linux-4.1.26/include/linux/r
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-@@ -91,6 +106,7 @@
+@@ -91,6 +106,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
@@ -11887,9 +12954,11 @@ diff -Nur linux-4.1.26.orig/include/linux/rtmutex.h linux-4.1.26/include/linux/r
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
-diff -Nur linux-4.1.26.orig/include/linux/rwlock_rt.h linux-4.1.26/include/linux/rwlock_rt.h
---- linux-4.1.26.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/rwlock_rt.h 2016-06-19 15:30:58.675296887 +0200
+diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
+new file mode 100644
+index 000000000000..49ed2d45d3be
+--- /dev/null
++++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,99 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
@@ -11990,9 +13059,10 @@ diff -Nur linux-4.1.26.orig/include/linux/rwlock_rt.h linux-4.1.26/include/linux
+ } while (0)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/rwlock_types.h linux-4.1.26/include/linux/rwlock_types.h
---- linux-4.1.26.orig/include/linux/rwlock_types.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/rwlock_types.h 2016-06-19 15:30:58.675296887 +0200
+diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
+index cc0072e93e36..d0da966ad7a0 100644
+--- a/include/linux/rwlock_types.h
++++ b/include/linux/rwlock_types.h
@@ -1,6 +1,10 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
@@ -12004,7 +13074,7 @@ diff -Nur linux-4.1.26.orig/include/linux/rwlock_types.h linux-4.1.26/include/li
/*
* include/linux/rwlock_types.h - generic rwlock type definitions
* and initializers
-@@ -43,6 +47,7 @@
+@@ -43,6 +47,7 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
@@ -12013,9 +13083,11 @@ diff -Nur linux-4.1.26.orig/include/linux/rwlock_types.h linux-4.1.26/include/li
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
#endif /* __LINUX_RWLOCK_TYPES_H */
-diff -Nur linux-4.1.26.orig/include/linux/rwlock_types_rt.h linux-4.1.26/include/linux/rwlock_types_rt.h
---- linux-4.1.26.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/rwlock_types_rt.h 2016-06-19 15:30:58.675296887 +0200
+diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
+new file mode 100644
+index 000000000000..b13832119591
+--- /dev/null
++++ b/include/linux/rwlock_types_rt.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
@@ -12050,9 +13122,10 @@ diff -Nur linux-4.1.26.orig/include/linux/rwlock_types_rt.h linux-4.1.26/include
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/rwsem.h linux-4.1.26/include/linux/rwsem.h
---- linux-4.1.26.orig/include/linux/rwsem.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/rwsem.h 2016-06-19 15:30:58.675296887 +0200
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index 8f498cdde280..2b2148431f14 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
@@ -18,6 +18,10 @@
#include <linux/osq_lock.h>
#endif
@@ -12064,16 +13137,18 @@ diff -Nur linux-4.1.26.orig/include/linux/rwsem.h linux-4.1.26/include/linux/rws
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -177,4 +181,6 @@
+@@ -177,4 +181,6 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
# define up_read_non_owner(sem) up_read(sem)
#endif
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* _LINUX_RWSEM_H */
-diff -Nur linux-4.1.26.orig/include/linux/rwsem_rt.h linux-4.1.26/include/linux/rwsem_rt.h
---- linux-4.1.26.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/rwsem_rt.h 2016-06-19 15:30:58.675296887 +0200
+diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
+new file mode 100644
+index 000000000000..928a05cbf94f
+--- /dev/null
++++ b/include/linux/rwsem_rt.h
@@ -0,0 +1,140 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
@@ -12215,10 +13290,11 @@ diff -Nur linux-4.1.26.orig/include/linux/rwsem_rt.h linux-4.1.26/include/linux/
+}
+#endif
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sched.h
---- linux-4.1.26.orig/include/linux/sched.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/sched.h 2016-06-19 15:30:58.675296887 +0200
-@@ -26,6 +26,7 @@
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 9e39deaeddd6..769f2cf30963 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -26,6 +26,7 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
#include <linux/preempt_mask.h>
@@ -12226,7 +13302,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -175,8 +176,6 @@
+@@ -175,8 +176,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
@@ -12235,7 +13311,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
extern void dump_cpu_task(int cpu);
struct seq_file;
-@@ -234,10 +233,7 @@
+@@ -234,10 +233,7 @@ extern char ___assert_task_state[1 - 2*!!(
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
@@ -12246,7 +13322,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0)
-@@ -302,6 +298,11 @@
+@@ -302,6 +298,11 @@ extern char ___assert_task_state[1 - 2*!!(
#endif
@@ -12258,7 +13334,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
/* Task command name length */
#define TASK_COMM_LEN 16
-@@ -901,6 +902,50 @@
+@@ -902,6 +903,50 @@ enum cpu_idle_type {
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
@@ -12309,7 +13385,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
-@@ -1292,6 +1337,7 @@
+@@ -1293,6 +1338,7 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -12317,7 +13393,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -1328,6 +1374,12 @@
+@@ -1329,6 +1375,12 @@ struct task_struct {
#endif
unsigned int policy;
@@ -12330,7 +13406,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1435,7 +1487,8 @@
+@@ -1436,7 +1488,8 @@ struct task_struct {
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -12340,7 +13416,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
-@@ -1451,6 +1504,9 @@
+@@ -1452,6 +1505,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -12350,7 +13426,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
-@@ -1483,10 +1539,15 @@
+@@ -1484,10 +1540,15 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -12366,7 +13442,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
unsigned long sas_ss_sp;
size_t sas_ss_size;
-@@ -1512,6 +1573,8 @@
+@@ -1513,6 +1574,8 @@ struct task_struct {
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
@@ -12375,7 +13451,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct rb_root pi_waiters;
-@@ -1706,6 +1769,12 @@
+@@ -1707,6 +1770,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
@@ -12388,7 +13464,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG
struct memcg_oom_info {
-@@ -1722,14 +1791,26 @@
+@@ -1723,14 +1792,26 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -12418,7 +13494,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -1918,6 +1999,15 @@
+@@ -1919,6 +2000,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -12434,7 +13510,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -1925,6 +2015,7 @@
+@@ -1926,6 +2016,7 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
@@ -12442,7 +13518,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
-@@ -1963,6 +2054,7 @@
+@@ -1964,6 +2055,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
@@ -12450,7 +13526,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
-@@ -2127,6 +2219,10 @@
+@@ -2128,6 +2220,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -12461,7 +13537,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2139,6 +2235,9 @@
+@@ -2140,6 +2236,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
@@ -12471,7 +13547,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
#endif
#ifdef CONFIG_NO_HZ_COMMON
-@@ -2355,6 +2454,7 @@
+@@ -2356,6 +2455,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -12479,7 +13555,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
-@@ -2471,12 +2571,24 @@
+@@ -2472,12 +2572,24 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
@@ -12504,7 +13580,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
-@@ -2788,6 +2900,43 @@
+@@ -2789,6 +2901,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -12548,7 +13624,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
-@@ -2819,6 +2968,51 @@
+@@ -2820,6 +2969,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -12600,7 +13676,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
-@@ -2840,12 +3034,16 @@
+@@ -2841,12 +3035,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
@@ -12617,7 +13693,7 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
static inline void cond_resched_rcu(void)
{
-@@ -3012,6 +3210,26 @@
+@@ -3013,6 +3211,26 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
@@ -12644,10 +13720,11 @@ diff -Nur linux-4.1.26.orig/include/linux/sched.h linux-4.1.26/include/linux/sch
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-diff -Nur linux-4.1.26.orig/include/linux/seqlock.h linux-4.1.26/include/linux/seqlock.h
---- linux-4.1.26.orig/include/linux/seqlock.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/seqlock.h 2016-06-19 15:30:58.675296887 +0200
-@@ -219,20 +219,30 @@
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index c07e3a536099..381bf3999617 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -219,20 +219,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
return __read_seqcount_retry(s, start);
}
@@ -12682,7 +13759,7 @@ diff -Nur linux-4.1.26.orig/include/linux/seqlock.h linux-4.1.26/include/linux/s
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
-@@ -305,10 +315,32 @@
+@@ -305,10 +315,32 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
@@ -12715,7 +13792,7 @@ diff -Nur linux-4.1.26.orig/include/linux/seqlock.h linux-4.1.26/include/linux/s
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
-@@ -323,36 +355,36 @@
+@@ -323,36 +355,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
@@ -12758,7 +13835,7 @@ diff -Nur linux-4.1.26.orig/include/linux/seqlock.h linux-4.1.26/include/linux/s
spin_unlock_irq(&sl->lock);
}
-@@ -361,7 +393,7 @@
+@@ -361,7 +393,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
@@ -12767,7 +13844,7 @@ diff -Nur linux-4.1.26.orig/include/linux/seqlock.h linux-4.1.26/include/linux/s
return flags;
}
-@@ -371,7 +403,7 @@
+@@ -371,7 +403,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
@@ -12776,10 +13853,11 @@ diff -Nur linux-4.1.26.orig/include/linux/seqlock.h linux-4.1.26/include/linux/s
spin_unlock_irqrestore(&sl->lock, flags);
}
-diff -Nur linux-4.1.26.orig/include/linux/signal.h linux-4.1.26/include/linux/signal.h
---- linux-4.1.26.orig/include/linux/signal.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/signal.h 2016-06-19 15:30:58.675296887 +0200
-@@ -233,6 +233,7 @@
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 883ceb1439fa..6da98d067bad 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -233,6 +233,7 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
@@ -12787,10 +13865,11 @@ diff -Nur linux-4.1.26.orig/include/linux/signal.h linux-4.1.26/include/linux/si
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
-diff -Nur linux-4.1.26.orig/include/linux/skbuff.h linux-4.1.26/include/linux/skbuff.h
---- linux-4.1.26.orig/include/linux/skbuff.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/skbuff.h 2016-06-19 15:30:58.675296887 +0200
-@@ -187,6 +187,7 @@
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ca2e26a486ee..ea41a11d3bc7 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -187,6 +187,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -12798,7 +13877,7 @@ diff -Nur linux-4.1.26.orig/include/linux/skbuff.h linux-4.1.26/include/linux/sk
};
struct sk_buff;
-@@ -1337,6 +1338,12 @@
+@@ -1337,6 +1338,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
__skb_queue_head_init(list);
}
@@ -12811,10 +13890,11 @@ diff -Nur linux-4.1.26.orig/include/linux/skbuff.h linux-4.1.26/include/linux/sk
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
-diff -Nur linux-4.1.26.orig/include/linux/smp.h linux-4.1.26/include/linux/smp.h
---- linux-4.1.26.orig/include/linux/smp.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/smp.h 2016-06-19 15:30:58.675296887 +0200
-@@ -185,6 +185,9 @@
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index c4414074bd88..e6ab36aeaaab 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -185,6 +185,9 @@ static inline void smp_init(void) { }
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
@@ -12824,23 +13904,11 @@ diff -Nur linux-4.1.26.orig/include/linux/smp.h linux-4.1.26/include/linux/smp.h
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
-diff -Nur linux-4.1.26.orig/include/linux/spinlock_api_smp.h linux-4.1.26/include/linux/spinlock_api_smp.h
---- linux-4.1.26.orig/include/linux/spinlock_api_smp.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/spinlock_api_smp.h 2016-06-19 15:30:58.679297041 +0200
-@@ -189,6 +189,8 @@
- return 0;
- }
-
--#include <linux/rwlock_api_smp.h>
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# include <linux/rwlock_api_smp.h>
-+#endif
-
- #endif /* __LINUX_SPINLOCK_API_SMP_H */
-diff -Nur linux-4.1.26.orig/include/linux/spinlock.h linux-4.1.26/include/linux/spinlock.h
---- linux-4.1.26.orig/include/linux/spinlock.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/spinlock.h 2016-06-19 15:30:58.679297041 +0200
-@@ -281,7 +281,11 @@
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 3e18379dfa6f..28f4366fd495 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -281,7 +281,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
@@ -12853,7 +13921,7 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock.h linux-4.1.26/include/linux/
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -292,6 +296,10 @@
+@@ -292,6 +296,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
@@ -12864,16 +13932,32 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock.h linux-4.1.26/include/linux/
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-@@ -426,4 +434,6 @@
+@@ -426,4 +434,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
-diff -Nur linux-4.1.26.orig/include/linux/spinlock_rt.h linux-4.1.26/include/linux/spinlock_rt.h
---- linux-4.1.26.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/spinlock_rt.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
+index 5344268e6e62..043263f30e81 100644
+--- a/include/linux/spinlock_api_smp.h
++++ b/include/linux/spinlock_api_smp.h
+@@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+new file mode 100644
+index 000000000000..f757096b230c
+--- /dev/null
++++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,174 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
@@ -13049,9 +14133,10 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock_rt.h linux-4.1.26/include/lin
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/spinlock_types.h linux-4.1.26/include/linux/spinlock_types.h
---- linux-4.1.26.orig/include/linux/spinlock_types.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/spinlock_types.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 73548eb13a5d..10bac715ea96 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
@@ -9,80 +9,15 @@
* Released under the General Public License (GPL).
*/
@@ -13140,9 +14225,11 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock_types.h linux-4.1.26/include/
-#include <linux/rwlock_types.h>
-
#endif /* __LINUX_SPINLOCK_TYPES_H */
-diff -Nur linux-4.1.26.orig/include/linux/spinlock_types_nort.h linux-4.1.26/include/linux/spinlock_types_nort.h
---- linux-4.1.26.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/spinlock_types_nort.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
+new file mode 100644
+index 000000000000..f1dac1fb1d6a
+--- /dev/null
++++ b/include/linux/spinlock_types_nort.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
@@ -13177,9 +14264,11 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock_types_nort.h linux-4.1.26/inc
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/spinlock_types_raw.h linux-4.1.26/include/linux/spinlock_types_raw.h
---- linux-4.1.26.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/spinlock_types_raw.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
+new file mode 100644
+index 000000000000..edffc4d53fc9
+--- /dev/null
++++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
@@ -13237,9 +14326,11 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock_types_raw.h linux-4.1.26/incl
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/spinlock_types_rt.h linux-4.1.26/include/linux/spinlock_types_rt.h
---- linux-4.1.26.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/spinlock_types_rt.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
+new file mode 100644
+index 000000000000..9fd431967abc
+--- /dev/null
++++ b/include/linux/spinlock_types_rt.h
@@ -0,0 +1,51 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
@@ -13292,10 +14383,11 @@ diff -Nur linux-4.1.26.orig/include/linux/spinlock_types_rt.h linux-4.1.26/inclu
+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/srcu.h linux-4.1.26/include/linux/srcu.h
---- linux-4.1.26.orig/include/linux/srcu.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/srcu.h 2016-06-19 15:30:58.679297041 +0200
-@@ -84,10 +84,10 @@
+diff --git a/include/linux/srcu.h b/include/linux/srcu.h
+index bdeb4567b71e..a9c3c49cda5d 100644
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
void process_srcu(struct work_struct *work);
@@ -13308,7 +14400,7 @@ diff -Nur linux-4.1.26.orig/include/linux/srcu.h linux-4.1.26/include/linux/srcu
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
-@@ -104,7 +104,7 @@
+@@ -104,7 +104,7 @@ void process_srcu(struct work_struct *work);
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
@@ -13317,9 +14409,10 @@ diff -Nur linux-4.1.26.orig/include/linux/srcu.h linux-4.1.26/include/linux/srcu
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-diff -Nur linux-4.1.26.orig/include/linux/swap.h linux-4.1.26/include/linux/swap.h
---- linux-4.1.26.orig/include/linux/swap.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/swap.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index cee108cbe2d5..4c07c12d2d82 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
@@ -13328,7 +14421,7 @@ diff -Nur linux-4.1.26.orig/include/linux/swap.h linux-4.1.26/include/linux/swap
#include <asm/page.h>
struct notifier_block;
-@@ -252,7 +253,8 @@
+@@ -252,7 +253,8 @@ struct swap_info_struct {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
@@ -13338,7 +14431,7 @@ diff -Nur linux-4.1.26.orig/include/linux/swap.h linux-4.1.26/include/linux/swap
static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
{
-@@ -296,6 +298,7 @@
+@@ -296,6 +298,7 @@ extern unsigned long nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
@@ -13346,10 +14439,11 @@ diff -Nur linux-4.1.26.orig/include/linux/swap.h linux-4.1.26/include/linux/swap
extern void lru_cache_add(struct page *);
extern void lru_cache_add_anon(struct page *page);
extern void lru_cache_add_file(struct page *page);
-diff -Nur linux-4.1.26.orig/include/linux/thread_info.h linux-4.1.26/include/linux/thread_info.h
---- linux-4.1.26.orig/include/linux/thread_info.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/thread_info.h 2016-06-19 15:30:58.679297041 +0200
-@@ -102,7 +102,17 @@
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index ff307b548ed3..be9f9dc6a4e1 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -102,7 +102,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -13368,10 +14462,11 @@ diff -Nur linux-4.1.26.orig/include/linux/thread_info.h linux-4.1.26/include/lin
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
-diff -Nur linux-4.1.26.orig/include/linux/timer.h linux-4.1.26/include/linux/timer.h
---- linux-4.1.26.orig/include/linux/timer.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/timer.h 2016-06-19 15:30:58.679297041 +0200
-@@ -241,7 +241,7 @@
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 8c5a197e1587..5fcd72c57ebe 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -13380,9 +14475,10 @@ diff -Nur linux-4.1.26.orig/include/linux/timer.h linux-4.1.26/include/linux/tim
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
-diff -Nur linux-4.1.26.orig/include/linux/uaccess.h linux-4.1.26/include/linux/uaccess.h
---- linux-4.1.26.orig/include/linux/uaccess.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/uaccess.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index ecd3319dac33..941b2dab50cd 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
@@ -1,21 +1,31 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
@@ -13423,7 +14519,7 @@ diff -Nur linux-4.1.26.orig/include/linux/uaccess.h linux-4.1.26/include/linux/u
/*
* make sure to have issued the store before a pagefault
* can hit.
-@@ -25,18 +35,32 @@
+@@ -25,18 +35,32 @@ static inline void pagefault_disable(void)
static inline void pagefault_enable(void)
{
@@ -13461,9 +14557,10 @@ diff -Nur linux-4.1.26.orig/include/linux/uaccess.h linux-4.1.26/include/linux/u
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
-diff -Nur linux-4.1.26.orig/include/linux/uprobes.h linux-4.1.26/include/linux/uprobes.h
---- linux-4.1.26.orig/include/linux/uprobes.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/uprobes.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
+index 60beb5dc7977..f5a644c649b4 100644
+--- a/include/linux/uprobes.h
++++ b/include/linux/uprobes.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
@@ -13472,10 +14569,11 @@ diff -Nur linux-4.1.26.orig/include/linux/uprobes.h linux-4.1.26/include/linux/u
struct vm_area_struct;
struct mm_struct;
-diff -Nur linux-4.1.26.orig/include/linux/vmstat.h linux-4.1.26/include/linux/vmstat.h
---- linux-4.1.26.orig/include/linux/vmstat.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/vmstat.h 2016-06-19 15:30:58.679297041 +0200
-@@ -33,7 +33,9 @@
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index 82e7db7f7100..3feaf770a8bd 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
*/
static inline void __count_vm_event(enum vm_event_item item)
{
@@ -13485,7 +14583,7 @@ diff -Nur linux-4.1.26.orig/include/linux/vmstat.h linux-4.1.26/include/linux/vm
}
static inline void count_vm_event(enum vm_event_item item)
-@@ -43,7 +45,9 @@
+@@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
@@ -13495,20 +14593,11 @@ diff -Nur linux-4.1.26.orig/include/linux/vmstat.h linux-4.1.26/include/linux/vm
}
static inline void count_vm_events(enum vm_event_item item, long delta)
-diff -Nur linux-4.1.26.orig/include/linux/wait.h linux-4.1.26/include/linux/wait.h
---- linux-4.1.26.orig/include/linux/wait.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/linux/wait.h 2016-06-19 15:30:58.679297041 +0200
-@@ -8,6 +8,7 @@
- #include <linux/spinlock.h>
- #include <asm/current.h>
- #include <uapi/linux/wait.h>
-+#include <linux/atomic.h>
-
- typedef struct __wait_queue wait_queue_t;
- typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-diff -Nur linux-4.1.26.orig/include/linux/wait-simple.h linux-4.1.26/include/linux/wait-simple.h
---- linux-4.1.26.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/wait-simple.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/wait-simple.h b/include/linux/wait-simple.h
+new file mode 100644
+index 000000000000..f86bca2c41d5
+--- /dev/null
++++ b/include/linux/wait-simple.h
@@ -0,0 +1,207 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
@@ -13717,9 +14806,23 @@ diff -Nur linux-4.1.26.orig/include/linux/wait-simple.h linux-4.1.26/include/lin
+})
+
+#endif
-diff -Nur linux-4.1.26.orig/include/linux/work-simple.h linux-4.1.26/include/linux/work-simple.h
---- linux-4.1.26.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/linux/work-simple.h 2016-06-19 15:30:58.679297041 +0200
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 2db83349865b..b3b54c26b6a0 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -8,6 +8,7 @@
+ #include <linux/spinlock.h>
+ #include <asm/current.h>
+ #include <uapi/linux/wait.h>
++#include <linux/atomic.h>
+
+ typedef struct __wait_queue wait_queue_t;
+ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
+diff --git a/include/linux/work-simple.h b/include/linux/work-simple.h
+new file mode 100644
+index 000000000000..f175fa9a6016
+--- /dev/null
++++ b/include/linux/work-simple.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
@@ -13745,10 +14848,11 @@ diff -Nur linux-4.1.26.orig/include/linux/work-simple.h linux-4.1.26/include/lin
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
-diff -Nur linux-4.1.26.orig/include/net/dst.h linux-4.1.26/include/net/dst.h
---- linux-4.1.26.orig/include/net/dst.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/net/dst.h 2016-06-19 15:30:58.679297041 +0200
-@@ -436,7 +436,7 @@
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 182b812d45e1..74baade721d6 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -436,7 +436,7 @@ static inline void dst_confirm(struct dst_entry *dst)
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
{
@@ -13757,10 +14861,11 @@ diff -Nur linux-4.1.26.orig/include/net/dst.h linux-4.1.26/include/net/dst.h
if (dst->pending_confirm) {
unsigned long now = jiffies;
-diff -Nur linux-4.1.26.orig/include/net/neighbour.h linux-4.1.26/include/net/neighbour.h
---- linux-4.1.26.orig/include/net/neighbour.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/net/neighbour.h 2016-06-19 15:30:58.679297041 +0200
-@@ -445,7 +445,7 @@
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index bd33e66f49aa..9c38018c6038 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -445,7 +445,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
}
#endif
@@ -13769,7 +14874,7 @@ diff -Nur linux-4.1.26.orig/include/net/neighbour.h linux-4.1.26/include/net/nei
{
unsigned int seq;
int hh_len;
-@@ -500,7 +500,7 @@
+@@ -500,7 +500,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
@@ -13778,10 +14883,11 @@ diff -Nur linux-4.1.26.orig/include/net/neighbour.h linux-4.1.26/include/net/nei
const struct net_device *dev)
{
unsigned int seq;
-diff -Nur linux-4.1.26.orig/include/net/netns/ipv4.h linux-4.1.26/include/net/netns/ipv4.h
---- linux-4.1.26.orig/include/net/netns/ipv4.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/net/netns/ipv4.h 2016-06-19 15:30:58.683297195 +0200
-@@ -69,6 +69,7 @@
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index 614a49be68a9..b4bdbe10b77a 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -69,6 +69,7 @@ struct netns_ipv4 {
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
@@ -13789,9 +14895,11 @@ diff -Nur linux-4.1.26.orig/include/net/netns/ipv4.h linux-4.1.26/include/net/ne
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-diff -Nur linux-4.1.26.orig/include/trace/events/hist.h linux-4.1.26/include/trace/events/hist.h
---- linux-4.1.26.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/trace/events/hist.h 2016-06-19 15:30:58.683297195 +0200
+diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
+new file mode 100644
+index 000000000000..37f6eb8c9dc2
+--- /dev/null
++++ b/include/trace/events/hist.h
@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
@@ -13867,9 +14975,11 @@ diff -Nur linux-4.1.26.orig/include/trace/events/hist.h linux-4.1.26/include/tra
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
-diff -Nur linux-4.1.26.orig/include/trace/events/latency_hist.h linux-4.1.26/include/trace/events/latency_hist.h
---- linux-4.1.26.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/include/trace/events/latency_hist.h 2016-06-19 15:30:58.683297195 +0200
+diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
+new file mode 100644
+index 000000000000..d3f2fbd560b1
+--- /dev/null
++++ b/include/trace/events/latency_hist.h
@@ -0,0 +1,29 @@
+#ifndef _LATENCY_HIST_H
+#define _LATENCY_HIST_H
@@ -13900,10 +15010,11 @@ diff -Nur linux-4.1.26.orig/include/trace/events/latency_hist.h linux-4.1.26/inc
+}
+
+#endif /* _LATENCY_HIST_H */
-diff -Nur linux-4.1.26.orig/include/trace/events/sched.h linux-4.1.26/include/trace/events/sched.h
---- linux-4.1.26.orig/include/trace/events/sched.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/include/trace/events/sched.h 2016-06-19 15:30:58.683297195 +0200
-@@ -55,9 +55,9 @@
+diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
+index 30fedaf3e56a..3b63828390a6 100644
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
@@ -13915,7 +15026,7 @@ diff -Nur linux-4.1.26.orig/include/trace/events/sched.h linux-4.1.26/include/tr
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
-@@ -71,25 +71,37 @@
+@@ -71,25 +71,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
@@ -13960,10 +15071,11 @@ diff -Nur linux-4.1.26.orig/include/trace/events/sched.h linux-4.1.26/include/tr
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p)
-diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
---- linux-4.1.26.orig/init/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/init/Kconfig 2016-06-19 15:30:58.683297195 +0200
-@@ -637,7 +637,7 @@
+diff --git a/init/Kconfig b/init/Kconfig
+index dc24dec60232..a70b5002df06 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -637,7 +637,7 @@ config RCU_FANOUT_EXACT
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
@@ -13972,7 +15084,7 @@ diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
default n
help
This option permits CPUs to enter dynticks-idle state even if
-@@ -664,7 +664,7 @@
+@@ -664,7 +664,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU
@@ -13981,7 +15093,7 @@ diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
help
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
-@@ -1101,6 +1101,7 @@
+@@ -1101,6 +1101,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
@@ -13989,7 +15101,7 @@ diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
default n
help
This feature lets you explicitly allocate real CPU bandwidth
-@@ -1688,6 +1689,7 @@
+@@ -1688,6 +1689,7 @@ choice
config SLAB
bool "SLAB"
@@ -13997,7 +15109,7 @@ diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
-@@ -1706,6 +1708,7 @@
+@@ -1706,6 +1708,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
@@ -14005,7 +15117,7 @@ diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
-@@ -1715,7 +1718,7 @@
+@@ -1715,7 +1718,7 @@ endchoice
config SLUB_CPU_PARTIAL
default y
@@ -14014,10 +15126,21 @@ diff -Nur linux-4.1.26.orig/init/Kconfig linux-4.1.26/init/Kconfig
bool "SLUB per cpu partial cache"
help
Per cpu partial caches accellerate objects allocation and freeing
-diff -Nur linux-4.1.26.orig/init/main.c linux-4.1.26/init/main.c
---- linux-4.1.26.orig/init/main.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/init/main.c 2016-06-19 15:30:58.683297195 +0200
-@@ -525,6 +525,7 @@
+diff --git a/init/Makefile b/init/Makefile
+index 7bc47ee31c36..88cf473554e0 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -33,4 +33,4 @@ silent_chk_compile.h = :
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+diff --git a/init/main.c b/init/main.c
+index 2a89545e0a5d..0486a8e11fc0 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -525,6 +525,7 @@ asmlinkage __visible void __init start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -14025,18 +15148,10 @@ diff -Nur linux-4.1.26.orig/init/main.c linux-4.1.26/init/main.c
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL);
-diff -Nur linux-4.1.26.orig/init/Makefile linux-4.1.26/init/Makefile
---- linux-4.1.26.orig/init/Makefile 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/init/Makefile 2016-06-19 15:30:58.683297195 +0200
-@@ -33,4 +33,4 @@
- include/generated/compile.h: FORCE
- @$($(quiet)chk_compile.h)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
-- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
-+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
---- linux-4.1.26.orig/ipc/mqueue.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/ipc/mqueue.c 2016-06-19 15:30:58.683297195 +0200
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index c3fc5c2b63f3..161a1807e6ef 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
@@ -47,8 +47,7 @@
#define RECV 1
@@ -14047,7 +15162,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
struct posix_msg_tree_node {
struct rb_node rb_node;
-@@ -568,15 +567,12 @@
+@@ -568,15 +567,12 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr,
wq_add(info, sr, ewp);
for (;;) {
@@ -14064,7 +15179,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
if (ewp->state == STATE_READY) {
retval = 0;
goto out;
-@@ -904,11 +900,15 @@
+@@ -904,11 +900,15 @@ out_name:
* list of waiting receivers. A sender checks that list before adding the new
* message into the message array. If there is a waiting receiver, then it
* bypasses the message array and directly hands the message over to the
@@ -14085,7 +15200,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
*
* The same algorithm is used for senders.
*/
-@@ -916,21 +916,29 @@
+@@ -916,21 +916,29 @@ out_name:
/* pipelined_send() - send a message directly to the task waiting in
* sys_mq_timedreceive() (without inserting message into a queue).
*/
@@ -14120,7 +15235,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
{
struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
-@@ -941,10 +949,9 @@
+@@ -941,10 +949,9 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
}
if (msg_insert(sender->msg, info))
return;
@@ -14133,7 +15248,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
sender->state = STATE_READY;
}
-@@ -962,6 +969,7 @@
+@@ -962,6 +969,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
struct timespec ts;
struct posix_msg_tree_node *new_leaf = NULL;
int ret = 0;
@@ -14141,7 +15256,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
if (u_abs_timeout) {
int res = prepare_timeout(u_abs_timeout, &expires, &ts);
-@@ -1045,7 +1053,7 @@
+@@ -1045,7 +1053,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
} else {
receiver = wq_get_first_waiter(info, RECV);
if (receiver) {
@@ -14150,7 +15265,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
} else {
/* adds message to the queue */
ret = msg_insert(msg_ptr, info);
-@@ -1058,6 +1066,7 @@
+@@ -1058,6 +1066,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
}
out_unlock:
spin_unlock(&info->lock);
@@ -14158,7 +15273,7 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
out_free:
if (ret)
free_msg(msg_ptr);
-@@ -1144,14 +1153,17 @@
+@@ -1144,14 +1153,17 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
msg_ptr = wait.msg;
}
} else {
@@ -14177,10 +15292,11 @@ diff -Nur linux-4.1.26.orig/ipc/mqueue.c linux-4.1.26/ipc/mqueue.c
ret = 0;
}
if (ret == 0) {
-diff -Nur linux-4.1.26.orig/ipc/msg.c linux-4.1.26/ipc/msg.c
---- linux-4.1.26.orig/ipc/msg.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/ipc/msg.c 2016-06-19 15:30:58.683297195 +0200
-@@ -188,6 +188,12 @@
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 3b2b0f5149ab..a75e79ff05ee 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -188,6 +188,12 @@ static void expunge_all(struct msg_queue *msq, int res)
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
@@ -14193,7 +15309,7 @@ diff -Nur linux-4.1.26.orig/ipc/msg.c linux-4.1.26/ipc/msg.c
msr->r_msg = NULL; /* initialize expunge ordering */
wake_up_process(msr->r_tsk);
/*
-@@ -198,6 +204,8 @@
+@@ -198,6 +204,8 @@ static void expunge_all(struct msg_queue *msq, int res)
*/
smp_mb();
msr->r_msg = ERR_PTR(res);
@@ -14202,7 +15318,7 @@ diff -Nur linux-4.1.26.orig/ipc/msg.c linux-4.1.26/ipc/msg.c
}
}
-@@ -574,6 +582,11 @@
+@@ -574,6 +582,11 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
@@ -14214,7 +15330,7 @@ diff -Nur linux-4.1.26.orig/ipc/msg.c linux-4.1.26/ipc/msg.c
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
-@@ -595,12 +608,13 @@
+@@ -595,12 +608,13 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
*/
smp_mb();
msr->r_msg = msg;
@@ -14229,10 +15345,11 @@ diff -Nur linux-4.1.26.orig/ipc/msg.c linux-4.1.26/ipc/msg.c
return 0;
}
-diff -Nur linux-4.1.26.orig/ipc/sem.c linux-4.1.26/ipc/sem.c
---- linux-4.1.26.orig/ipc/sem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/ipc/sem.c 2016-06-19 15:30:58.683297195 +0200
-@@ -690,6 +690,13 @@
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 534caee6bf33..fbfdb0b699e0 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -690,6 +690,13 @@ undo:
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
@@ -14246,7 +15363,7 @@ diff -Nur linux-4.1.26.orig/ipc/sem.c linux-4.1.26/ipc/sem.c
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
-@@ -701,6 +708,7 @@
+@@ -701,6 +708,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
q->pid = error;
list_add_tail(&q->list, pt);
@@ -14254,7 +15371,7 @@ diff -Nur linux-4.1.26.orig/ipc/sem.c linux-4.1.26/ipc/sem.c
}
/**
-@@ -714,6 +722,7 @@
+@@ -714,6 +722,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
@@ -14262,7 +15379,7 @@ diff -Nur linux-4.1.26.orig/ipc/sem.c linux-4.1.26/ipc/sem.c
struct sem_queue *q, *t;
int did_something;
-@@ -726,6 +735,7 @@
+@@ -726,6 +735,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
}
if (did_something)
preempt_enable();
@@ -14270,9 +15387,84 @@ diff -Nur linux-4.1.26.orig/ipc/sem.c linux-4.1.26/ipc/sem.c
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab.c
---- linux-4.1.26.orig/kernel/bpf/hashtab.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/bpf/hashtab.c 2016-06-19 15:30:58.683297195 +0200
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index 08561f1acd13..c61e9131ecec 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
+
+ config MUTEX_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config RWSEM_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config LOCK_SPIN_ON_OWNER
+ def_bool y
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 3f9c97419f02..11dbe26a8279 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,3 +1,16 @@
++config PREEMPT
++ bool
++ select PREEMPT_COUNT
++
++config PREEMPT_RT_BASE
++ bool
++ select PREEMPT
++
++config HAVE_PREEMPT_LAZY
++ bool
++
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
+
+ choice
+ prompt "Preemption Model"
+@@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY
+
+ Select this if you are building a kernel for a desktop system.
+
+-config PREEMPT
++config PREEMPT__LL
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+- select PREEMPT_COUNT
++ select PREEMPT
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+ help
+ This option reduces the latency of the kernel by making
+@@ -52,6 +65,22 @@ config PREEMPT
+ embedded system with latency requirements in the milliseconds
+ range.
+
++config PREEMPT_RTB
++ bool "Preemptible Kernel (Basic RT)"
++ select PREEMPT_RT_BASE
++ help
++ This option is basically the same as (Low-Latency Desktop) but
++ enables changes which are preliminary for the full preemptible
++ RT kernel.
++
++config PREEMPT_RT_FULL
++ bool "Fully Preemptible Kernel (RT)"
++ depends on IRQ_FORCED_THREADING
++ select PREEMPT_RT_BASE
++ select PREEMPT_RCU
++ help
++ All and everything
++
+ endchoice
+
+ config PREEMPT_COUNT
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 83c209d9b17a..972b76bf54b7 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
@@ -17,7 +17,7 @@
struct bpf_htab {
struct bpf_map map;
@@ -14282,7 +15474,7 @@ diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab
u32 count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
-@@ -82,7 +82,7 @@
+@@ -82,7 +82,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
for (i = 0; i < htab->n_buckets; i++)
INIT_HLIST_HEAD(&htab->buckets[i]);
@@ -14291,7 +15483,7 @@ diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab
htab->count = 0;
htab->elem_size = sizeof(struct htab_elem) +
-@@ -230,7 +230,7 @@
+@@ -230,7 +230,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
l_new->hash = htab_map_hash(l_new->key, key_size);
/* bpf_map_update_elem() can be called in_irq() */
@@ -14300,7 +15492,7 @@ diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab
head = select_bucket(htab, l_new->hash);
-@@ -266,11 +266,11 @@
+@@ -266,11 +266,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
} else {
htab->count++;
}
@@ -14314,7 +15506,7 @@ diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab
kfree(l_new);
return ret;
}
-@@ -291,7 +291,7 @@
+@@ -291,7 +291,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
hash = htab_map_hash(key, key_size);
@@ -14323,7 +15515,7 @@ diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab
head = select_bucket(htab, hash);
-@@ -304,7 +304,7 @@
+@@ -304,7 +304,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
ret = 0;
}
@@ -14332,10 +15524,11 @@ diff -Nur linux-4.1.26.orig/kernel/bpf/hashtab.c linux-4.1.26/kernel/bpf/hashtab
return ret;
}
-diff -Nur linux-4.1.26.orig/kernel/cgroup.c linux-4.1.26/kernel/cgroup.c
---- linux-4.1.26.orig/kernel/cgroup.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/cgroup.c 2016-06-19 15:30:58.687297349 +0200
-@@ -4422,10 +4422,10 @@
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 3abce1e0f910..d818976f1d62 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -4423,10 +4423,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -14348,7 +15541,7 @@ diff -Nur linux-4.1.26.orig/kernel/cgroup.c linux-4.1.26/kernel/cgroup.c
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4464,8 +4464,8 @@
+@@ -4465,8 +4465,8 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -14359,7 +15552,7 @@ diff -Nur linux-4.1.26.orig/kernel/cgroup.c linux-4.1.26/kernel/cgroup.c
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5081,6 +5081,7 @@
+@@ -5080,6 +5080,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
@@ -14367,10 +15560,11 @@ diff -Nur linux-4.1.26.orig/kernel/cgroup.c linux-4.1.26/kernel/cgroup.c
/*
* Used to destroy pidlists and separate to serve as flush domain.
-diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
---- linux-4.1.26.orig/kernel/cpu.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/cpu.c 2016-06-19 15:30:58.687297349 +0200
-@@ -74,8 +74,8 @@
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 94bbe4695232..0351ac42263e 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -74,8 +74,8 @@ static struct {
#endif
} cpu_hotplug = {
.active_writer = NULL,
@@ -14380,7 +15574,7 @@ diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
#ifdef CONFIG_DEBUG_LOCK_ALLOC
.dep_map = {.name = "cpu_hotplug.lock" },
#endif
-@@ -88,6 +88,289 @@
+@@ -88,6 +88,289 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -14670,7 +15864,7 @@ diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
void get_online_cpus(void)
{
-@@ -349,13 +632,15 @@
+@@ -349,13 +632,15 @@ static int __ref take_cpu_down(void *_param)
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
@@ -14687,7 +15881,7 @@ diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -363,7 +648,34 @@
+@@ -363,7 +648,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
if (!cpu_online(cpu))
return -EINVAL;
@@ -14722,7 +15916,7 @@ diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
-@@ -389,8 +701,12 @@
+@@ -389,8 +701,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
#endif
synchronize_rcu();
@@ -14735,7 +15929,7 @@ diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
/*
* So now all preempt/rcu users must observe !cpu_active().
*/
-@@ -427,9 +743,14 @@
+@@ -427,9 +743,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
check_for_tasks(cpu);
out_release:
@@ -14750,10 +15944,11 @@ diff -Nur linux-4.1.26.orig/kernel/cpu.c linux-4.1.26/kernel/cpu.c
return err;
}
-diff -Nur linux-4.1.26.orig/kernel/debug/kdb/kdb_io.c linux-4.1.26/kernel/debug/kdb/kdb_io.c
---- linux-4.1.26.orig/kernel/debug/kdb/kdb_io.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/debug/kdb/kdb_io.c 2016-06-19 15:30:58.687297349 +0200
-@@ -554,7 +554,6 @@
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index fc1ef736253c..83c666537a7a 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
int linecount;
int colcount;
int logging, saved_loglevel = 0;
@@ -14761,7 +15956,7 @@ diff -Nur linux-4.1.26.orig/kernel/debug/kdb/kdb_io.c linux-4.1.26/kernel/debug/
int got_printf_lock = 0;
int retlen = 0;
int fnd, len;
-@@ -565,8 +564,6 @@
+@@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
unsigned long uninitialized_var(flags);
preempt_disable();
@@ -14770,7 +15965,7 @@ diff -Nur linux-4.1.26.orig/kernel/debug/kdb/kdb_io.c linux-4.1.26/kernel/debug/
/* Serialize kdb_printf if multiple cpus try to write at once.
* But if any cpu goes recursive in kdb, just print the output,
-@@ -855,7 +852,6 @@
+@@ -855,7 +852,6 @@ kdb_print_out:
} else {
__release(kdb_printf_lock);
}
@@ -14778,7 +15973,7 @@ diff -Nur linux-4.1.26.orig/kernel/debug/kdb/kdb_io.c linux-4.1.26/kernel/debug/
preempt_enable();
return retlen;
}
-@@ -865,9 +861,11 @@
+@@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
@@ -14790,10 +15985,11 @@ diff -Nur linux-4.1.26.orig/kernel/debug/kdb/kdb_io.c linux-4.1.26/kernel/debug/
return r;
}
-diff -Nur linux-4.1.26.orig/kernel/events/core.c linux-4.1.26/kernel/events/core.c
---- linux-4.1.26.orig/kernel/events/core.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/events/core.c 2016-06-19 15:30:58.687297349 +0200
-@@ -6925,6 +6925,7 @@
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6da64f0d0630..aa35b5850d36 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6925,6 +6925,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
@@ -14801,10 +15997,11 @@ diff -Nur linux-4.1.26.orig/kernel/events/core.c linux-4.1.26/kernel/events/core
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
-diff -Nur linux-4.1.26.orig/kernel/exit.c linux-4.1.26/kernel/exit.c
---- linux-4.1.26.orig/kernel/exit.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/exit.c 2016-06-19 15:30:58.687297349 +0200
-@@ -144,7 +144,7 @@
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 819f51ec4f55..44eb884773e0 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -144,7 +144,7 @@ static void __exit_signal(struct task_struct *tsk)
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
@@ -14813,10 +16010,11 @@ diff -Nur linux-4.1.26.orig/kernel/exit.c linux-4.1.26/kernel/exit.c
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
-diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
---- linux-4.1.26.orig/kernel/fork.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/fork.c 2016-06-19 15:30:58.687297349 +0200
-@@ -108,7 +108,7 @@
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 8209fa2d36ef..8f8a0a13d212 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -108,7 +108,7 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
@@ -14825,7 +16023,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
-@@ -244,7 +244,9 @@
+@@ -244,7 +244,9 @@ static inline void put_signal_struct(struct signal_struct *sig)
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
@@ -14836,7 +16034,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -260,7 +262,18 @@
+@@ -260,7 +262,18 @@ void __put_task_struct(struct task_struct *tsk)
if (!profile_handoff_task(tsk))
free_task(tsk);
}
@@ -14855,7 +16053,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
void __init __weak arch_task_cache_init(void) { }
-@@ -374,6 +387,7 @@
+@@ -374,6 +387,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
#endif
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
@@ -14863,7 +16061,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
account_kernel_stack(ti, 1);
-@@ -680,6 +694,19 @@
+@@ -680,6 +694,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -14883,7 +16081,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
/*
* Decrement the use count and release all resources for an mm.
*/
-@@ -1214,6 +1241,9 @@
+@@ -1214,6 +1241,9 @@ static void rt_mutex_init_task(struct task_struct *p)
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -14893,7 +16091,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
-@@ -1338,6 +1368,7 @@
+@@ -1338,6 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
@@ -14901,7 +16099,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
-@@ -1345,7 +1376,8 @@
+@@ -1345,7 +1376,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -14911,7 +16109,7 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
p->vtime_snap = 0;
p->vtime_snap_whence = VTIME_SLEEPING;
#endif
-@@ -1396,6 +1428,9 @@
+@@ -1396,6 +1428,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
@@ -14921,10 +16119,11 @@ diff -Nur linux-4.1.26.orig/kernel/fork.c linux-4.1.26/kernel/fork.c
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
-diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
---- linux-4.1.26.orig/kernel/futex.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/futex.c 2016-06-19 15:32:42.447298576 +0200
-@@ -738,7 +738,9 @@
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 2214b70f1910..70ba363359a5 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -738,7 +738,9 @@ void exit_pi_state_list(struct task_struct *curr)
* task still owns the PI-state:
*/
if (head->next != next) {
@@ -14934,7 +16133,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
continue;
}
-@@ -1090,9 +1092,11 @@
+@@ -1090,9 +1092,11 @@ static void __unqueue_futex(struct futex_q *q)
/*
* The hash bucket lock must be held when this is called.
@@ -14948,7 +16147,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
{
struct task_struct *p = q->task;
-@@ -1100,14 +1104,10 @@
+@@ -1100,14 +1104,10 @@ static void wake_futex(struct futex_q *q)
return;
/*
@@ -14966,7 +16165,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as
-@@ -1117,16 +1117,15 @@
+@@ -1117,16 +1117,15 @@ static void wake_futex(struct futex_q *q)
*/
smp_wmb();
q->lock_ptr = NULL;
@@ -14986,7 +16185,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
int ret = 0;
if (!pi_state)
-@@ -1188,7 +1187,17 @@
+@@ -1188,7 +1187,17 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
raw_spin_unlock_irq(&new_owner->pi_lock);
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
@@ -15005,7 +16204,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
return 0;
}
-@@ -1227,6 +1236,7 @@
+@@ -1227,6 +1236,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
@@ -15013,7 +16212,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
if (!bitset)
return -EINVAL;
-@@ -1254,13 +1264,14 @@
+@@ -1254,13 +1264,14 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
if (!(this->bitset & bitset))
continue;
@@ -15029,7 +16228,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
out_put_key:
put_futex_key(&key);
out:
-@@ -1279,6 +1290,7 @@
+@@ -1279,6 +1290,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
int ret, op_ret;
@@ -15037,7 +16236,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
-@@ -1330,7 +1342,7 @@
+@@ -1330,7 +1342,7 @@ retry_private:
ret = -EINVAL;
goto out_unlock;
}
@@ -15046,7 +16245,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
if (++ret >= nr_wake)
break;
}
-@@ -1344,7 +1356,7 @@
+@@ -1344,7 +1356,7 @@ retry_private:
ret = -EINVAL;
goto out_unlock;
}
@@ -15055,7 +16254,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
if (++op_ret >= nr_wake2)
break;
}
-@@ -1354,6 +1366,7 @@
+@@ -1354,6 +1366,7 @@ retry_private:
out_unlock:
double_unlock_hb(hb1, hb2);
@@ -15063,7 +16262,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
out_put_keys:
put_futex_key(&key2);
out_put_key1:
-@@ -1513,6 +1526,7 @@
+@@ -1513,6 +1526,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
@@ -15071,7 +16270,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
if (requeue_pi) {
/*
-@@ -1689,7 +1703,7 @@
+@@ -1689,7 +1703,7 @@ retry_private:
* woken by futex_unlock_pi().
*/
if (++task_count <= nr_wake && !requeue_pi) {
@@ -15080,7 +16279,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
continue;
}
-@@ -1715,6 +1729,16 @@
+@@ -1715,6 +1729,16 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -15097,7 +16296,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
-@@ -1729,6 +1753,7 @@
+@@ -1729,6 +1753,7 @@ retry_private:
out_unlock:
free_pi_state(pi_state);
double_unlock_hb(hb1, hb2);
@@ -15105,24 +16304,43 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
hb_waiters_dec(hb2);
/*
-@@ -2422,7 +2447,15 @@
+@@ -2422,13 +2447,22 @@ retry:
*/
match = futex_top_waiter(hb, &key);
if (match) {
- ret = wake_futex_pi(uaddr, uval, match);
-+ ret = wake_futex_pi(uaddr, uval, match, hb);
++ ret = wake_futex_pi(uaddr, uval, match, hb);
+
-+ /*
-+ * In case of success wake_futex_pi dropped the hash
-+ * bucket lock.
-+ */
-+ if (!ret)
-+ goto out_putkey;
++ /*
++ * In case of success wake_futex_pi dropped the hash
++ * bucket lock.
++ */
++ if (!ret)
++ goto out_putkey;
+
/*
* The atomic access to the futex value generated a
* pagefault, so retry the user-access and the wakeup:
-@@ -2458,6 +2491,7 @@
+ */
+ if (ret == -EFAULT)
+ goto pi_faulted;
++
+ /*
+ * A unconditional UNLOCK_PI op raced against a waiter
+ * setting the FUTEX_WAITERS bit. Try again.
+@@ -2438,6 +2472,11 @@ retry:
+ put_futex_key(&key);
+ goto retry;
+ }
++
++ /*
++ * wake_futex_pi has detected invalid state. Tell user
++ * space.
++ */
+ goto out_unlock;
+ }
+
+@@ -2458,6 +2497,7 @@ retry:
out_unlock:
spin_unlock(&hb->lock);
@@ -15130,7 +16348,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
put_futex_key(&key);
return ret;
-@@ -2568,7 +2602,7 @@
+@@ -2568,7 +2608,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
@@ -15139,7 +16357,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2593,10 +2627,7 @@
+@@ -2593,10 +2633,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -15151,7 +16369,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
-@@ -2627,20 +2658,55 @@
+@@ -2627,20 +2664,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -15218,7 +16436,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2649,14 +2715,15 @@
+@@ -2649,14 +2721,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -15236,7 +16454,7 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
}
} else {
/*
-@@ -2669,7 +2736,8 @@
+@@ -2669,7 +2742,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -15246,10 +16464,11 @@ diff -Nur linux-4.1.26.orig/kernel/futex.c linux-4.1.26/kernel/futex.c
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
-diff -Nur linux-4.1.26.orig/kernel/irq/handle.c linux-4.1.26/kernel/irq/handle.c
---- linux-4.1.26.orig/kernel/irq/handle.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/irq/handle.c 2016-06-19 15:30:58.691297504 +0200
-@@ -133,6 +133,8 @@
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index 635480270858..26a63672c263 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -133,6 +133,8 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
@@ -15258,7 +16477,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/handle.c linux-4.1.26/kernel/irq/handle.c
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
-@@ -173,7 +175,11 @@
+@@ -173,7 +175,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
@@ -15271,9 +16490,10 @@ diff -Nur linux-4.1.26.orig/kernel/irq/handle.c linux-4.1.26/kernel/irq/handle.c
if (!noirqdebug)
note_interrupt(irq, desc, retval);
-diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
---- linux-4.1.26.orig/kernel/irq/manage.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/irq/manage.c 2016-06-19 15:30:58.691297504 +0200
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index e68932bb308e..79c55c26eaee 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
@@ -22,6 +22,7 @@
#include "internals.h"
@@ -15282,7 +16502,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
-@@ -30,6 +31,7 @@
+@@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg)
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
@@ -15290,7 +16510,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
#endif
static void __synchronize_hardirq(struct irq_desc *desc)
-@@ -179,6 +181,62 @@
+@@ -179,6 +181,62 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
@@ -15353,7 +16573,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
-@@ -218,7 +276,17 @@
+@@ -218,7 +276,17 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
@@ -15371,7 +16591,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -256,10 +324,8 @@
+@@ -256,10 +324,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -15383,7 +16603,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -281,6 +347,13 @@
+@@ -281,6 +347,13 @@ out:
kref_put(&notify->kref, notify->release);
}
@@ -15397,7 +16617,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -310,6 +383,8 @@
+@@ -310,6 +383,8 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
@@ -15406,7 +16626,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
}
raw_spin_lock_irqsave(&desc->lock, flags);
-@@ -697,6 +772,12 @@
+@@ -697,6 +772,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
return IRQ_NONE;
}
@@ -15419,7 +16639,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
static int irq_wait_for_interrupt(struct irqaction *action)
{
set_current_state(TASK_INTERRUPTIBLE);
-@@ -723,7 +804,8 @@
+@@ -723,7 +804,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
static void irq_finalize_oneshot(struct irq_desc *desc,
struct irqaction *action)
{
@@ -15429,7 +16649,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
return;
again:
chip_bus_lock(desc);
-@@ -825,7 +907,15 @@
+@@ -825,7 +907,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -15446,7 +16666,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
return ret;
}
-@@ -877,6 +967,18 @@
+@@ -877,6 +967,18 @@ static void irq_thread_dtor(struct callback_head *unused)
irq_finalize_oneshot(desc, action);
}
@@ -15465,13 +16685,14 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
/*
* Interrupt handler thread
*/
-@@ -907,7 +1009,15 @@
+@@ -907,7 +1009,15 @@ static int irq_thread(void *data)
action_ret = handler_fn(desc, action);
if (action_ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
+-
+ if (action_ret == IRQ_WAKE_THREAD)
+ irq_wake_secondary(desc, action);
-
++
+#ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_disable();
+ add_interrupt_randomness(action->irq, 0,
@@ -15481,7 +16702,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
wake_threads_waitq(desc);
}
-@@ -951,20 +1061,36 @@
+@@ -951,20 +1061,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(irq_wake_thread);
@@ -15501,7 +16722,6 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
- set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
- new->thread_fn = new->handler;
- new->handler = irq_default_primary_handler;
-- }
+ /*
+ * Handle the case where we have a real primary handler and a
+ * thread handler. We force thread them as well by creating a
@@ -15517,7 +16737,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
+ new->secondary->dev_id = new->dev_id;
+ new->secondary->irq = new->irq;
+ new->secondary->name = new->name;
-+ }
+ }
+ /* Deal with the primary handler */
+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+ new->thread_fn = new->handler;
@@ -15526,7 +16746,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
}
static int irq_request_resources(struct irq_desc *desc)
-@@ -984,6 +1110,48 @@
+@@ -984,6 +1110,48 @@ static void irq_release_resources(struct irq_desc *desc)
c->irq_release_resources(d);
}
@@ -15575,7 +16795,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
-@@ -1004,6 +1172,8 @@
+@@ -1004,6 +1172,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!try_module_get(desc->owner))
return -ENODEV;
@@ -15584,7 +16804,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
/*
* Check whether the interrupt nests into another interrupt
* thread.
-@@ -1021,8 +1191,11 @@
+@@ -1021,8 +1191,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
new->handler = irq_nested_primary_handler;
} else {
@@ -15598,7 +16818,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
}
/*
-@@ -1031,37 +1204,14 @@
+@@ -1031,37 +1204,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* thread.
*/
if (new->thread_fn && !nested) {
@@ -15642,7 +16862,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
}
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
-@@ -1221,6 +1371,9 @@
+@@ -1221,6 +1371,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
@@ -15652,7 +16872,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
-@@ -1234,7 +1387,6 @@
+@@ -1234,7 +1387,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irq, nmsk, omsk);
}
@@ -15660,7 +16880,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
*old_ptr = new;
irq_pm_install_action(desc, new);
-@@ -1260,6 +1412,8 @@
+@@ -1260,6 +1412,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
if (new->thread)
wake_up_process(new->thread);
@@ -15669,7 +16889,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
register_irq_proc(irq, desc);
new->dir = NULL;
-@@ -1290,6 +1444,13 @@
+@@ -1290,6 +1444,13 @@ out_thread:
kthread_stop(t);
put_task_struct(t);
}
@@ -15683,7 +16903,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
out_mput:
module_put(desc->owner);
return ret;
-@@ -1397,9 +1558,14 @@
+@@ -1397,9 +1558,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (action->thread) {
kthread_stop(action->thread);
put_task_struct(action->thread);
@@ -15698,7 +16918,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
return action;
}
-@@ -1543,8 +1709,10 @@
+@@ -1543,8 +1709,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
retval = __setup_irq(irq, desc, action);
chip_bus_sync_unlock(desc);
@@ -15710,10 +16930,11 @@ diff -Nur linux-4.1.26.orig/kernel/irq/manage.c linux-4.1.26/kernel/irq/manage.c
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
if (!retval && (irqflags & IRQF_SHARED)) {
-diff -Nur linux-4.1.26.orig/kernel/irq/settings.h linux-4.1.26/kernel/irq/settings.h
---- linux-4.1.26.orig/kernel/irq/settings.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/irq/settings.h 2016-06-19 15:30:58.691297504 +0200
-@@ -15,6 +15,7 @@
+diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
+index 3320b84cc60f..34b803b89d41 100644
+--- a/kernel/irq/settings.h
++++ b/kernel/irq/settings.h
+@@ -15,6 +15,7 @@ enum {
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
@@ -15721,7 +16942,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/settings.h linux-4.1.26/kernel/irq/settin
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
-@@ -28,6 +29,7 @@
+@@ -28,6 +29,7 @@ enum {
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
@@ -15729,7 +16950,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/settings.h linux-4.1.26/kernel/irq/settin
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
-@@ -38,6 +40,16 @@
+@@ -38,6 +40,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
@@ -15746,10 +16967,11 @@ diff -Nur linux-4.1.26.orig/kernel/irq/settings.h linux-4.1.26/kernel/irq/settin
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
-diff -Nur linux-4.1.26.orig/kernel/irq/spurious.c linux-4.1.26/kernel/irq/spurious.c
---- linux-4.1.26.orig/kernel/irq/spurious.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/irq/spurious.c 2016-06-19 15:30:58.691297504 +0200
-@@ -444,6 +444,10 @@
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index e2514b0e439e..903a69c45689 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
@@ -15760,7 +16982,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq/spurious.c linux-4.1.26/kernel/irq/spurio
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
-@@ -456,6 +460,10 @@
+@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
@@ -15771,9 +16993,10 @@ diff -Nur linux-4.1.26.orig/kernel/irq/spurious.c linux-4.1.26/kernel/irq/spurio
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
-diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
---- linux-4.1.26.orig/kernel/irq_work.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/irq_work.c 2016-06-19 15:30:58.691297504 +0200
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index cbf9fb899d92..58cf46638ca0 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
@@ -15782,7 +17005,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
#include <asm/processor.h>
-@@ -65,6 +66,8 @@
+@@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
*/
bool irq_work_queue_on(struct irq_work *work, int cpu)
{
@@ -15791,7 +17014,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu));
-@@ -75,7 +78,12 @@
+@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
@@ -15805,7 +17028,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
arch_send_call_function_single_ipi(cpu);
return true;
-@@ -86,6 +94,9 @@
+@@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
@@ -15815,7 +17038,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
-@@ -93,13 +104,15 @@
+@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
@@ -15838,7 +17061,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
arch_irq_work_raise();
}
-@@ -116,9 +129,8 @@
+@@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
@@ -15850,7 +17073,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -132,7 +144,7 @@
+@@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
struct irq_work *work;
struct llist_node *llnode;
@@ -15859,7 +17082,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
if (llist_empty(list))
return;
-@@ -169,7 +181,16 @@
+@@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
@@ -15877,7 +17100,7 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
}
EXPORT_SYMBOL_GPL(irq_work_run);
-@@ -179,8 +200,17 @@
+@@ -179,8 +200,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
@@ -15895,82 +17118,11 @@ diff -Nur linux-4.1.26.orig/kernel/irq_work.c linux-4.1.26/kernel/irq_work.c
/*
* Synchronize against the irq_work @entry, ensures the entry is not
-diff -Nur linux-4.1.26.orig/kernel/Kconfig.locks linux-4.1.26/kernel/Kconfig.locks
---- linux-4.1.26.orig/kernel/Kconfig.locks 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/Kconfig.locks 2016-06-19 15:30:58.683297195 +0200
-@@ -225,11 +225,11 @@
-
- config MUTEX_SPIN_ON_OWNER
- def_bool y
-- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config RWSEM_SPIN_ON_OWNER
- def_bool y
-- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config LOCK_SPIN_ON_OWNER
- def_bool y
-diff -Nur linux-4.1.26.orig/kernel/Kconfig.preempt linux-4.1.26/kernel/Kconfig.preempt
---- linux-4.1.26.orig/kernel/Kconfig.preempt 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/Kconfig.preempt 2016-06-19 15:30:58.683297195 +0200
-@@ -1,3 +1,16 @@
-+config PREEMPT
-+ bool
-+ select PREEMPT_COUNT
-+
-+config PREEMPT_RT_BASE
-+ bool
-+ select PREEMPT
-+
-+config HAVE_PREEMPT_LAZY
-+ bool
-+
-+config PREEMPT_LAZY
-+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
-
- choice
- prompt "Preemption Model"
-@@ -33,9 +46,9 @@
-
- Select this if you are building a kernel for a desktop system.
-
--config PREEMPT
-+config PREEMPT__LL
- bool "Preemptible Kernel (Low-Latency Desktop)"
-- select PREEMPT_COUNT
-+ select PREEMPT
- select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
- help
- This option reduces the latency of the kernel by making
-@@ -52,6 +65,22 @@
- embedded system with latency requirements in the milliseconds
- range.
-
-+config PREEMPT_RTB
-+ bool "Preemptible Kernel (Basic RT)"
-+ select PREEMPT_RT_BASE
-+ help
-+ This option is basically the same as (Low-Latency Desktop) but
-+ enables changes which are preliminary for the full preemptible
-+ RT kernel.
-+
-+config PREEMPT_RT_FULL
-+ bool "Fully Preemptible Kernel (RT)"
-+ depends on IRQ_FORCED_THREADING
-+ select PREEMPT_RT_BASE
-+ select PREEMPT_RCU
-+ help
-+ All and everything
-+
- endchoice
-
- config PREEMPT_COUNT
-diff -Nur linux-4.1.26.orig/kernel/ksysfs.c linux-4.1.26/kernel/ksysfs.c
---- linux-4.1.26.orig/kernel/ksysfs.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/ksysfs.c 2016-06-19 15:30:58.691297504 +0200
-@@ -136,6 +136,15 @@
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index 6683ccef9fff..d6fc8eeaab8f 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_KEXEC */
@@ -15986,7 +17138,7 @@ diff -Nur linux-4.1.26.orig/kernel/ksysfs.c linux-4.1.26/kernel/ksysfs.c
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -203,6 +212,9 @@
+@@ -203,6 +212,9 @@ static struct attribute * kernel_attrs[] = {
&vmcoreinfo_attr.attr,
#endif
&rcu_expedited_attr.attr,
@@ -15996,9 +17148,45 @@ diff -Nur linux-4.1.26.orig/kernel/ksysfs.c linux-4.1.26/kernel/ksysfs.c
NULL
};
-diff -Nur linux-4.1.26.orig/kernel/locking/lglock.c linux-4.1.26/kernel/locking/lglock.c
---- linux-4.1.26.orig/kernel/locking/lglock.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/lglock.c 2016-06-19 15:30:58.691297504 +0200
+diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
+index de7a416cca2a..ab269cf0475a 100644
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
+@@ -1,5 +1,5 @@
+
+-obj-y += mutex.o semaphore.o rwsem.o
++obj-y += semaphore.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+ CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
+@@ -8,7 +8,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
+ endif
+
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
++obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++obj-y += rwsem.o
++endif
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -22,8 +26,11 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
++endif
+ obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
+ obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
+index 86ae2aebf004..9397974b142f 100644
+--- a/kernel/locking/lglock.c
++++ b/kernel/locking/lglock.c
@@ -4,6 +4,15 @@
#include <linux/cpu.h>
#include <linux/string.h>
@@ -16088,7 +17276,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/lglock.c linux-4.1.26/kernel/locking/
}
EXPORT_SYMBOL(lg_local_unlock_cpu);
-@@ -64,12 +82,12 @@
+@@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg)
{
int i;
@@ -16104,7 +17292,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/lglock.c linux-4.1.26/kernel/locking/
}
}
EXPORT_SYMBOL(lg_global_lock);
-@@ -80,10 +98,35 @@
+@@ -80,10 +98,35 @@ void lg_global_unlock(struct lglock *lg)
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
for_each_possible_cpu(i) {
@@ -16143,10 +17331,11 @@ diff -Nur linux-4.1.26.orig/kernel/locking/lglock.c linux-4.1.26/kernel/locking/
+ }
+}
+#endif
-diff -Nur linux-4.1.26.orig/kernel/locking/lockdep.c linux-4.1.26/kernel/locking/lockdep.c
---- linux-4.1.26.orig/kernel/locking/lockdep.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/lockdep.c 2016-06-19 15:30:58.691297504 +0200
-@@ -3563,6 +3563,7 @@
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index aaeae885d9af..577f02617c63 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3563,6 +3563,7 @@ static void check_flags(unsigned long flags)
}
}
@@ -16154,7 +17343,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/lockdep.c linux-4.1.26/kernel/locking
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3577,6 +3578,7 @@
+@@ -3577,6 +3578,7 @@ static void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
@@ -16162,9 +17351,10 @@ diff -Nur linux-4.1.26.orig/kernel/locking/lockdep.c linux-4.1.26/kernel/locking
if (!debug_locks)
print_irqtrace_events(current);
-diff -Nur linux-4.1.26.orig/kernel/locking/locktorture.c linux-4.1.26/kernel/locking/locktorture.c
---- linux-4.1.26.orig/kernel/locking/locktorture.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/locktorture.c 2016-06-19 15:30:58.695297658 +0200
+diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
+index ec8cce259779..aa60d919e336 100644
+--- a/kernel/locking/locktorture.c
++++ b/kernel/locking/locktorture.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/kthread.h>
@@ -16173,43 +17363,11 @@ diff -Nur linux-4.1.26.orig/kernel/locking/locktorture.c linux-4.1.26/kernel/loc
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
-diff -Nur linux-4.1.26.orig/kernel/locking/Makefile linux-4.1.26/kernel/locking/Makefile
---- linux-4.1.26.orig/kernel/locking/Makefile 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/Makefile 2016-06-19 15:30:58.691297504 +0200
-@@ -1,5 +1,5 @@
-
--obj-y += mutex.o semaphore.o rwsem.o
-+obj-y += semaphore.o
-
- ifdef CONFIG_FUNCTION_TRACER
- CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
-@@ -8,7 +8,11 @@
- CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
- endif
-
-+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
-+obj-y += mutex.o
- obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
-+obj-y += rwsem.o
-+endif
- obj-$(CONFIG_LOCKDEP) += lockdep.o
- ifeq ($(CONFIG_PROC_FS),y)
- obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -22,8 +26,11 @@
- obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
- obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
- obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
- obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
- obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
-+endif
- obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
-+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
- obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
- obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
-diff -Nur linux-4.1.26.orig/kernel/locking/rt.c linux-4.1.26/kernel/locking/rt.c
---- linux-4.1.26.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/kernel/locking/rt.c 2016-06-19 15:30:58.695297658 +0200
+diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
+new file mode 100644
+index 000000000000..c236efa4834c
+--- /dev/null
++++ b/kernel/locking/rt.c
@@ -0,0 +1,461 @@
+/*
+ * kernel/rt.c
@@ -16672,9 +17830,10 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rt.c linux-4.1.26/kernel/locking/rt.c
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
-diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking/rtmutex.c
---- linux-4.1.26.orig/kernel/locking/rtmutex.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/rtmutex.c 2016-06-19 15:30:58.695297658 +0200
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index b025295f4966..e0b0d9b419b5 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
@@ -7,6 +7,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
@@ -16695,7 +17854,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
#include "rtmutex_common.h"
-@@ -69,6 +75,12 @@
+@@ -69,6 +75,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
clear_rt_mutex_waiters(lock);
}
@@ -16708,7 +17867,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/*
* We can speed up the acquire/release, if the architecture
* supports cmpxchg and if there's no debugging state to be set up
-@@ -300,7 +312,7 @@
+@@ -300,7 +312,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
* of task. We do not use the spin_xx_mutex() variants here as we are
* outside of the debug path.)
*/
@@ -16717,7 +17876,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
{
unsigned long flags;
-@@ -335,6 +347,14 @@
+@@ -335,6 +347,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -16732,7 +17891,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -342,7 +362,8 @@
+@@ -342,7 +362,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -16742,7 +17901,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
/*
-@@ -479,7 +500,7 @@
+@@ -479,7 +500,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -16751,7 +17910,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
goto out_unlock_pi;
/*
-@@ -641,13 +662,16 @@
+@@ -641,13 +662,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -16770,7 +17929,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
raw_spin_unlock(&lock->wait_lock);
return 0;
}
-@@ -740,6 +764,25 @@
+@@ -740,6 +764,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return ret;
}
@@ -16796,7 +17955,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/*
* Try to take an rt-mutex
*
-@@ -750,8 +793,9 @@
+@@ -750,8 +793,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* @waiter: The waiter that is queued to the lock's wait list if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/
@@ -16808,7 +17967,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
{
unsigned long flags;
-@@ -790,8 +834,10 @@
+@@ -790,8 +834,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* If waiter is not the highest priority waiter of
* @lock, give up.
*/
@@ -16820,7 +17979,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/*
* We can acquire the lock. Remove the waiter from the
-@@ -809,14 +855,10 @@
+@@ -809,14 +855,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
@@ -16838,7 +17997,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -865,6 +907,347 @@
+@@ -865,6 +907,347 @@ takeit:
return 1;
}
@@ -17186,7 +18345,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/*
* Task blocks on lock.
*
-@@ -896,6 +1279,23 @@
+@@ -896,6 +1279,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
return -EDEADLK;
raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -17210,7 +18369,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -919,7 +1319,7 @@
+@@ -919,7 +1319,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
@@ -17219,7 +18378,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -957,8 +1357,9 @@
+@@ -957,8 +1357,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
/*
* Wake up the next waiter on the lock.
*
@@ -17231,7 +18390,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
*
* Called with lock->wait_lock held.
*/
-@@ -996,7 +1397,7 @@
+@@ -996,7 +1397,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* long as we hold lock->wait_lock. The waiter task needs to
* acquire it in order to dequeue the waiter.
*/
@@ -17240,7 +18399,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
/*
-@@ -1010,7 +1411,7 @@
+@@ -1010,7 +1411,7 @@ static void remove_waiter(struct rt_mutex *lock,
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -17249,7 +18408,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
unsigned long flags;
raw_spin_lock_irqsave(&current->pi_lock, flags);
-@@ -1035,7 +1436,8 @@
+@@ -1035,7 +1436,8 @@ static void remove_waiter(struct rt_mutex *lock,
__rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -17259,7 +18418,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-@@ -1071,17 +1473,17 @@
+@@ -1071,17 +1473,17 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
@@ -17279,7 +18438,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
next_lock, NULL, task);
}
-@@ -1099,7 +1501,8 @@
+@@ -1099,7 +1501,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -17289,7 +18448,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
{
int ret = 0;
-@@ -1122,6 +1525,12 @@
+@@ -1122,6 +1525,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
break;
}
@@ -17302,7 +18461,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1156,25 +1565,102 @@
+@@ -1156,25 +1565,102 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
}
}
@@ -17409,7 +18568,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
raw_spin_unlock(&lock->wait_lock);
return 0;
}
-@@ -1192,13 +1678,23 @@
+@@ -1192,13 +1678,23 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
if (likely(!ret))
/* sleep on the mutex */
@@ -17435,7 +18594,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
/*
-@@ -1255,7 +1751,7 @@
+@@ -1255,7 +1751,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
/*
* Slow path to release a rt-mutex:
*/
@@ -17444,7 +18603,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
rt_mutex_slowunlock(struct rt_mutex *lock)
{
raw_spin_lock(&lock->wait_lock);
-@@ -1298,7 +1794,7 @@
+@@ -1298,7 +1794,7 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
while (!rt_mutex_has_waiters(lock)) {
/* Drops lock->wait_lock ! */
if (unlock_rt_mutex_safe(lock) == true)
@@ -17453,7 +18612,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/* Relock the rtmutex and try again */
raw_spin_lock(&lock->wait_lock);
}
-@@ -1311,8 +1807,7 @@
+@@ -1311,8 +1807,7 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
raw_spin_unlock(&lock->wait_lock);
@@ -17463,7 +18622,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
/*
-@@ -1323,31 +1818,36 @@
+@@ -1323,31 +1818,36 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -17504,7 +18663,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
static inline int
-@@ -1363,12 +1863,14 @@
+@@ -1363,12 +1863,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
@@ -17523,7 +18682,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
/**
-@@ -1380,7 +1882,7 @@
+@@ -1380,7 +1882,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
{
might_sleep();
@@ -17532,7 +18691,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1397,7 +1899,7 @@
+@@ -1397,7 +1899,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
might_sleep();
@@ -17541,7 +18700,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1410,11 +1912,30 @@
+@@ -1410,11 +1912,30 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
@@ -17573,7 +18732,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
* rt_mutex_timed_lock - lock a rt_mutex interruptible
* the timeout structure is provided
* by the caller
-@@ -1434,6 +1955,7 @@
+@@ -1434,6 +1955,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -17581,7 +18740,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -1463,6 +1985,22 @@
+@@ -1463,6 +1985,22 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/**
@@ -17604,7 +18763,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
* rt_mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed
*
-@@ -1492,13 +2030,12 @@
+@@ -1492,13 +2030,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -17619,7 +18778,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1513,7 +2050,7 @@
+@@ -1513,7 +2050,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -17628,7 +18787,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1561,6 +2098,35 @@
+@@ -1561,6 +2098,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
return 1;
}
@@ -17664,7 +18823,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task,
RT_MUTEX_FULL_CHAINWALK);
-@@ -1575,7 +2141,7 @@
+@@ -1575,7 +2141,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}
@@ -17673,7 +18832,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
remove_waiter(lock, waiter);
raw_spin_unlock(&lock->wait_lock);
-@@ -1631,7 +2197,7 @@
+@@ -1631,7 +2197,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
set_current_state(TASK_INTERRUPTIBLE);
/* sleep on the mutex */
@@ -17682,7 +18841,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
if (unlikely(ret))
remove_waiter(lock, waiter);
-@@ -1646,3 +2212,89 @@
+@@ -1646,3 +2212,89 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
return ret;
}
@@ -17772,10 +18931,11 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex.c linux-4.1.26/kernel/locking
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
+#endif
-diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex_common.h linux-4.1.26/kernel/locking/rtmutex_common.h
---- linux-4.1.26.orig/kernel/locking/rtmutex_common.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/rtmutex_common.h 2016-06-19 15:30:58.695297658 +0200
-@@ -49,6 +49,7 @@
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 855212501407..4d317e9a5d0f 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
struct rb_node pi_tree_entry;
struct task_struct *task;
struct rt_mutex *lock;
@@ -17783,7 +18943,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex_common.h linux-4.1.26/kernel/
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -119,6 +120,9 @@
+@@ -119,6 +120,9 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
@@ -17793,7 +18953,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex_common.h linux-4.1.26/kernel/
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
-@@ -132,10 +136,24 @@
+@@ -132,10 +136,24 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter);
extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
@@ -17818,10 +18978,11 @@ diff -Nur linux-4.1.26.orig/kernel/locking/rtmutex_common.h linux-4.1.26/kernel/
+}
+
#endif
-diff -Nur linux-4.1.26.orig/kernel/locking/spinlock.c linux-4.1.26/kernel/locking/spinlock.c
---- linux-4.1.26.orig/kernel/locking/spinlock.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/spinlock.c 2016-06-19 15:30:58.695297658 +0200
-@@ -124,8 +124,11 @@
+diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
+index db3ccb1dd614..909779647bd1 100644
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
@@ -17833,7 +18994,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/spinlock.c linux-4.1.26/kernel/lockin
#endif
-@@ -209,6 +212,8 @@
+@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
@@ -17842,7 +19003,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/spinlock.c linux-4.1.26/kernel/lockin
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
-@@ -353,6 +358,8 @@
+@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
@@ -17851,10 +19012,11 @@ diff -Nur linux-4.1.26.orig/kernel/locking/spinlock.c linux-4.1.26/kernel/lockin
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
-diff -Nur linux-4.1.26.orig/kernel/locking/spinlock_debug.c linux-4.1.26/kernel/locking/spinlock_debug.c
---- linux-4.1.26.orig/kernel/locking/spinlock_debug.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/locking/spinlock_debug.c 2016-06-19 15:30:58.695297658 +0200
-@@ -31,6 +31,7 @@
+diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
+index 0374a596cffa..94970338d518 100644
+--- a/kernel/locking/spinlock_debug.c
++++ b/kernel/locking/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -17862,7 +19024,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/spinlock_debug.c linux-4.1.26/kernel/
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
-@@ -48,6 +49,7 @@
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
}
EXPORT_SYMBOL(__rwlock_init);
@@ -17870,7 +19032,7 @@ diff -Nur linux-4.1.26.orig/kernel/locking/spinlock_debug.c linux-4.1.26/kernel/
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
-@@ -159,6 +161,7 @@
+@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock);
}
@@ -17878,16 +19040,17 @@ diff -Nur linux-4.1.26.orig/kernel/locking/spinlock_debug.c linux-4.1.26/kernel/
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
-@@ -300,3 +303,5 @@
+@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
+
+#endif
-diff -Nur linux-4.1.26.orig/kernel/panic.c linux-4.1.26/kernel/panic.c
---- linux-4.1.26.orig/kernel/panic.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/panic.c 2016-06-19 15:30:58.695297658 +0200
-@@ -399,9 +399,11 @@
+diff --git a/kernel/panic.c b/kernel/panic.c
+index a4f7820f5930..cd91cec1f29a 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -399,9 +399,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
@@ -17899,10 +19062,11 @@ diff -Nur linux-4.1.26.orig/kernel/panic.c linux-4.1.26/kernel/panic.c
oops_id++;
return 0;
-diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/hibernate.c
---- linux-4.1.26.orig/kernel/power/hibernate.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/power/hibernate.c 2016-06-19 15:30:58.695297658 +0200
-@@ -285,6 +285,8 @@
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 2329daae5255..b8f41a3635fd 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -285,6 +285,8 @@ static int create_image(int platform_mode)
local_irq_disable();
@@ -17911,7 +19075,7 @@ diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/h
error = syscore_suspend();
if (error) {
printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -314,6 +316,7 @@
+@@ -314,6 +316,7 @@ static int create_image(int platform_mode)
syscore_resume();
Enable_irqs:
@@ -17919,7 +19083,7 @@ diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/h
local_irq_enable();
Enable_cpus:
-@@ -437,6 +440,7 @@
+@@ -437,6 +440,7 @@ static int resume_target_kernel(bool platform_mode)
goto Enable_cpus;
local_irq_disable();
@@ -17927,7 +19091,7 @@ diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/h
error = syscore_suspend();
if (error)
-@@ -470,6 +474,7 @@
+@@ -470,6 +474,7 @@ static int resume_target_kernel(bool platform_mode)
syscore_resume();
Enable_irqs:
@@ -17935,7 +19099,7 @@ diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/h
local_irq_enable();
Enable_cpus:
-@@ -555,6 +560,7 @@
+@@ -555,6 +560,7 @@ int hibernation_platform_enter(void)
goto Platform_finish;
local_irq_disable();
@@ -17943,7 +19107,7 @@ diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/h
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
-@@ -567,6 +573,7 @@
+@@ -567,6 +573,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
@@ -17951,10 +19115,11 @@ diff -Nur linux-4.1.26.orig/kernel/power/hibernate.c linux-4.1.26/kernel/power/h
local_irq_enable();
enable_nonboot_cpus();
-diff -Nur linux-4.1.26.orig/kernel/power/suspend.c linux-4.1.26/kernel/power/suspend.c
---- linux-4.1.26.orig/kernel/power/suspend.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/power/suspend.c 2016-06-19 15:30:58.695297658 +0200
-@@ -356,6 +356,8 @@
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 8d7a1ef72758..db920b1704b1 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -356,6 +356,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -17963,7 +19128,7 @@ diff -Nur linux-4.1.26.orig/kernel/power/suspend.c linux-4.1.26/kernel/power/sus
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
-@@ -370,6 +372,8 @@
+@@ -370,6 +372,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
syscore_resume();
}
@@ -17972,10 +19137,11 @@ diff -Nur linux-4.1.26.orig/kernel/power/suspend.c linux-4.1.26/kernel/power/sus
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
-diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/printk.c
---- linux-4.1.26.orig/kernel/printk/printk.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/printk/printk.c 2016-06-19 15:30:58.695297658 +0200
-@@ -1163,6 +1163,7 @@
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3c1aca0c3543..398bf2bbd3bc 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1163,6 +1163,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
{
char *text;
int len = 0;
@@ -17983,7 +19149,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1174,7 +1175,14 @@
+@@ -1174,7 +1175,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u64 seq;
u32 idx;
enum log_flags prev;
@@ -17999,7 +19165,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
-@@ -1195,6 +1203,14 @@
+@@ -1195,6 +1203,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -18014,7 +19180,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
}
/* move first record forward until length fits into the buffer */
-@@ -1208,6 +1224,14 @@
+@@ -1208,6 +1224,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -18029,7 +19195,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
}
/* last message fitting into this dump */
-@@ -1248,6 +1272,7 @@
+@@ -1248,6 +1272,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
@@ -18037,15 +19203,20 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
raw_spin_unlock_irq(&logbuf_lock);
kfree(text);
-@@ -1401,6 +1426,7 @@
+@@ -1401,6 +1426,12 @@ static void call_console_drivers(int level, const char *text, size_t len)
if (!console_drivers)
return;
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
++ if (in_irq() || in_nmi())
++ return;
++ }
++
+ migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1413,6 +1439,7 @@
+@@ -1413,6 +1444,7 @@ static void call_console_drivers(int level, const char *text, size_t len)
continue;
con->write(con, text, len);
}
@@ -18053,7 +19224,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
}
/*
-@@ -1473,6 +1500,15 @@
+@@ -1473,6 +1505,15 @@ static inline int can_use_console(unsigned int cpu)
static int console_trylock_for_printk(void)
{
unsigned int cpu = smp_processor_id();
@@ -18069,7 +19240,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
if (!console_trylock())
return 0;
-@@ -1607,6 +1643,62 @@
+@@ -1607,6 +1648,62 @@ static size_t cont_print_text(char *text, size_t size)
return textlen;
}
@@ -18132,7 +19303,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
asmlinkage int vprintk_emit(int facility, int level,
const char *dict, size_t dictlen,
const char *fmt, va_list args)
-@@ -1623,6 +1715,13 @@
+@@ -1623,6 +1720,13 @@ asmlinkage int vprintk_emit(int facility, int level,
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
@@ -18146,7 +19317,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -1764,8 +1863,7 @@
+@@ -1764,8 +1868,7 @@ asmlinkage int vprintk_emit(int facility, int level,
* console_sem which would prevent anyone from printing to
* console
*/
@@ -18156,7 +19327,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
/*
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers and wake up
-@@ -1773,7 +1871,7 @@
+@@ -1773,7 +1876,7 @@ asmlinkage int vprintk_emit(int facility, int level,
*/
if (console_trylock_for_printk())
console_unlock();
@@ -18165,7 +19336,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
lockdep_on();
}
-@@ -1902,26 +2000,6 @@
+@@ -1902,26 +2005,6 @@ DEFINE_PER_CPU(printk_func_t, printk_func);
#endif /* CONFIG_PRINTK */
@@ -18192,7 +19363,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
static int __add_preferred_console(char *name, int idx, char *options,
char *brl_options)
{
-@@ -2143,11 +2221,16 @@
+@@ -2143,11 +2226,16 @@ static void console_cont_flush(char *text, size_t size)
goto out;
len = cont_print_text(text, size);
@@ -18209,7 +19380,7 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
return;
out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2246,12 +2329,17 @@
+@@ -2246,12 +2334,17 @@ skip:
console_idx = log_next(console_idx);
console_seq++;
console_prev = msg->flags;
@@ -18227,10 +19398,23 @@ diff -Nur linux-4.1.26.orig/kernel/printk/printk.c linux-4.1.26/kernel/printk/pr
if (do_cond_resched)
cond_resched();
-diff -Nur linux-4.1.26.orig/kernel/ptrace.c linux-4.1.26/kernel/ptrace.c
---- linux-4.1.26.orig/kernel/ptrace.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/ptrace.c 2016-06-19 15:30:58.699297812 +0200
-@@ -129,7 +129,14 @@
+@@ -2304,6 +2397,11 @@ void console_unblank(void)
+ {
+ struct console *c;
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
++ if (in_irq() || in_nmi())
++ return;
++ }
++
+ /*
+ * console_unblank can no longer be called in interrupt context unless
+ * oops_in_progress is set to 1..
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 261ee21e62db..e27549ebd299 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -129,7 +129,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
@@ -18246,10 +19430,11 @@ diff -Nur linux-4.1.26.orig/kernel/ptrace.c linux-4.1.26/kernel/ptrace.c
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
-diff -Nur linux-4.1.26.orig/kernel/rcu/rcutorture.c linux-4.1.26/kernel/rcu/rcutorture.c
---- linux-4.1.26.orig/kernel/rcu/rcutorture.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/rcu/rcutorture.c 2016-06-19 15:30:58.699297812 +0200
-@@ -389,6 +389,7 @@
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 8dbe27611ec3..7b6170a46409 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -389,6 +389,7 @@ static struct rcu_torture_ops rcu_ops = {
.name = "rcu"
};
@@ -18257,7 +19442,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/rcutorture.c linux-4.1.26/kernel/rcu/rcut
/*
* Definitions for rcu_bh torture testing.
*/
-@@ -428,6 +429,12 @@
+@@ -428,6 +429,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
.name = "rcu_bh"
};
@@ -18270,9 +19455,10 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/rcutorture.c linux-4.1.26/kernel/rcu/rcut
/*
* Don't even think about trying any of these in real life!!!
* The names includes "busted", and they really means it!
-diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
---- linux-4.1.26.orig/kernel/rcu/tree.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/rcu/tree.c 2016-06-19 15:30:58.699297812 +0200
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 8cf7304b2867..965df22d96ad 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
@@ -56,6 +56,11 @@
#include <linux/random.h>
#include <linux/ftrace_event.h>
@@ -18285,7 +19471,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
#include "tree.h"
#include "rcu.h"
-@@ -220,6 +225,19 @@
+@@ -220,6 +225,19 @@ void rcu_sched_qs(void)
}
}
@@ -18305,7 +19491,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
void rcu_bh_qs(void)
{
if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
-@@ -229,6 +247,7 @@
+@@ -229,6 +247,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
}
}
@@ -18313,7 +19499,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-@@ -404,6 +423,7 @@
+@@ -404,6 +423,7 @@ unsigned long rcu_batches_completed_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
@@ -18321,7 +19507,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -431,6 +451,13 @@
+@@ -431,6 +451,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@@ -18335,7 +19521,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -1545,7 +1572,7 @@
+@@ -1545,7 +1572,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
!ACCESS_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread)
return;
@@ -18344,7 +19530,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
}
/*
-@@ -1986,7 +2013,7 @@
+@@ -1986,7 +2013,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
ACCESS_ONCE(rsp->gpnum),
TPS("reqwait"));
rsp->gp_state = RCU_GP_WAIT_GPS;
@@ -18353,7 +19539,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
ACCESS_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
/* Locking provides needed memory barrier. */
-@@ -2015,7 +2042,7 @@
+@@ -2015,7 +2042,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
ACCESS_ONCE(rsp->gpnum),
TPS("fqswait"));
rsp->gp_state = RCU_GP_WAIT_FQS;
@@ -18362,7 +19548,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
((gf = ACCESS_ONCE(rsp->gp_flags)) &
RCU_GP_FLAG_FQS) ||
(!ACCESS_ONCE(rnp->qsmask) &&
-@@ -2860,18 +2887,17 @@
+@@ -2860,18 +2887,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
@@ -18383,7 +19569,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2883,18 +2909,105 @@
+@@ -2883,18 +2909,105 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
return;
@@ -18495,7 +19681,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -3040,6 +3153,7 @@
+@@ -3040,6 +3153,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -18503,7 +19689,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -3048,6 +3162,7 @@
+@@ -3048,6 +3162,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -18511,7 +19697,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3139,6 +3254,7 @@
+@@ -3139,6 +3254,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -18519,7 +19705,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3165,6 +3281,7 @@
+@@ -3165,6 +3281,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -18527,7 +19713,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3677,6 +3794,7 @@
+@@ -3677,6 +3794,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
@@ -18535,7 +19721,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3685,6 +3803,7 @@
+@@ -3685,6 +3803,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -18543,7 +19729,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4021,7 +4140,7 @@
+@@ -4021,7 +4140,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
}
}
@@ -18552,7 +19738,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
-@@ -4120,7 +4239,6 @@
+@@ -4120,7 +4239,6 @@ void __init rcu_init(void)
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
__rcu_init_preempt();
@@ -18560,9 +19746,10 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.c linux-4.1.26/kernel/rcu/tree.c
/*
* We don't need protection against CPU-hotplug here because
-diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
---- linux-4.1.26.orig/kernel/rcu/tree.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/rcu/tree.h 2016-06-19 15:30:58.699297812 +0200
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index a69d3dab2ec4..8a9f0d3640de 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
@@ -27,6 +27,7 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
@@ -18571,7 +19758,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
-@@ -210,7 +211,7 @@
+@@ -210,7 +211,7 @@ struct rcu_node {
/* This can happen due to race conditions. */
#endif /* #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_NOCB_CPU
@@ -18580,7 +19767,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
/* Place for rcu_nocb_kthread() to wait GP. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
int need_future_gp[2];
-@@ -349,7 +350,7 @@
+@@ -349,7 +350,7 @@ struct rcu_data {
atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
struct rcu_head **nocb_follower_tail;
@@ -18589,7 +19776,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
struct task_struct *nocb_kthread;
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
-@@ -438,7 +439,7 @@
+@@ -438,7 +439,7 @@ struct rcu_state {
unsigned long gpnum; /* Current gp number. */
unsigned long completed; /* # of last completed gp. */
struct task_struct *gp_kthread; /* Task for grace periods. */
@@ -18598,7 +19785,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
short gp_flags; /* Commands for GP task. */
short gp_state; /* GP kthread sleep state. */
-@@ -529,12 +530,10 @@
+@@ -529,12 +530,10 @@ extern struct rcu_state rcu_preempt_state;
DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
#endif /* #ifdef CONFIG_PREEMPT_RCU */
@@ -18611,7 +19798,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
#ifndef RCU_TREE_NONCORE
-@@ -553,10 +552,9 @@
+@@ -553,10 +552,9 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -18623,9 +19810,10 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree.h linux-4.1.26/kernel/rcu/tree.h
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
-diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tree_plugin.h
---- linux-4.1.26.orig/kernel/rcu/tree_plugin.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/rcu/tree_plugin.h 2016-06-19 15:30:58.699297812 +0200
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 8c0ec0f5a027..54da8f44d586 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
@@ -24,27 +24,20 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
@@ -18656,7 +19844,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -291,7 +284,7 @@
+@@ -291,7 +284,7 @@ void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
@@ -18665,7 +19853,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
lockdep_rcu_suspicious(__FILE__, __LINE__,
"rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
-@@ -496,15 +489,6 @@
+@@ -496,15 +489,6 @@ static void rcu_preempt_check_callbacks(void)
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -18681,7 +19869,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
-@@ -939,6 +923,19 @@
+@@ -939,6 +923,19 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -18701,7 +19889,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
#ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
-@@ -970,16 +967,6 @@
+@@ -970,16 +967,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
@@ -18718,7 +19906,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1125,23 +1112,6 @@
+@@ -1125,23 +1112,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
}
/*
@@ -18742,7 +19930,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1196,67 +1166,6 @@
+@@ -1196,67 +1166,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0;
}
@@ -18810,7 +19998,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1286,26 +1195,12 @@
+@@ -1286,26 +1195,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
free_cpumask_var(cm);
}
@@ -18837,7 +20025,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1328,11 +1223,6 @@
+@@ -1328,11 +1223,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -18849,7 +20037,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
static bool rcu_is_callbacks_kthread(void)
{
return false;
-@@ -1356,7 +1246,7 @@
+@@ -1356,7 +1246,7 @@ static void rcu_prepare_kthreads(int cpu)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -18858,7 +20046,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1374,7 +1264,9 @@
+@@ -1374,7 +1264,9 @@ int rcu_needs_cpu(unsigned long *delta_jiffies)
return rcu_cpu_has_callbacks(NULL);
}
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
@@ -18868,7 +20056,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1472,6 +1364,8 @@
+@@ -1472,6 +1364,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
return cbs_ready;
}
@@ -18877,7 +20065,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1512,7 +1406,7 @@
+@@ -1512,7 +1406,7 @@ int rcu_needs_cpu(unsigned long *dj)
return 0;
}
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
@@ -18886,7 +20074,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
* is to sense whether nohz mode has been enabled or disabled via sysfs.
-@@ -1859,7 +1753,7 @@
+@@ -1859,7 +1753,7 @@ early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
*/
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
@@ -18895,7 +20083,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
}
/*
-@@ -1877,8 +1771,8 @@
+@@ -1877,8 +1771,8 @@ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
static void rcu_init_one_nocb(struct rcu_node *rnp)
{
@@ -18906,7 +20094,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
}
#ifndef CONFIG_RCU_NOCB_CPU_ALL
-@@ -1903,7 +1797,7 @@
+@@ -1903,7 +1797,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
/* Prior smp_mb__after_atomic() orders against prior enqueue. */
ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
@@ -18915,7 +20103,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
}
}
-@@ -2116,7 +2010,7 @@
+@@ -2116,7 +2010,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
*/
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
@@ -18924,7 +20112,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
rnp->nocb_gp_wq[c & 0x1],
(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
if (likely(d))
-@@ -2144,7 +2038,7 @@
+@@ -2144,7 +2038,7 @@ wait_again:
/* Wait for callbacks to appear. */
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
@@ -18933,7 +20121,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
!ACCESS_ONCE(my_rdp->nocb_leader_sleep));
/* Memory barrier handled by smp_mb() calls below and repoll. */
} else if (firsttime) {
-@@ -2219,7 +2113,7 @@
+@@ -2219,7 +2113,7 @@ wait_again:
* List was empty, wake up the follower.
* Memory barriers supplied by atomic_long_add().
*/
@@ -18942,7 +20130,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
}
}
-@@ -2240,7 +2134,7 @@
+@@ -2240,7 +2134,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
"FollowerSleep");
@@ -18951,7 +20139,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
ACCESS_ONCE(rdp->nocb_follower_head));
} else if (firsttime) {
/* Don't drown trace log with "Poll"! */
-@@ -2399,7 +2293,7 @@
+@@ -2399,7 +2293,7 @@ void __init rcu_init_nohz(void)
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
rdp->nocb_tail = &rdp->nocb_head;
@@ -18960,10 +20148,11 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/tree_plugin.h linux-4.1.26/kernel/rcu/tre
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
}
-diff -Nur linux-4.1.26.orig/kernel/rcu/update.c linux-4.1.26/kernel/rcu/update.c
---- linux-4.1.26.orig/kernel/rcu/update.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/rcu/update.c 2016-06-19 15:30:58.699297812 +0200
-@@ -227,6 +227,7 @@
+diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
+index 1f133350da01..1718c4fe9bce 100644
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -227,6 +227,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -18971,7 +20160,7 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/update.c linux-4.1.26/kernel/rcu/update.c
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
-@@ -253,6 +254,7 @@
+@@ -253,6 +254,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
@@ -18979,10 +20168,11 @@ diff -Nur linux-4.1.26.orig/kernel/rcu/update.c linux-4.1.26/kernel/rcu/update.c
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-diff -Nur linux-4.1.26.orig/kernel/relay.c linux-4.1.26/kernel/relay.c
---- linux-4.1.26.orig/kernel/relay.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/relay.c 2016-06-19 15:30:58.699297812 +0200
-@@ -339,6 +339,10 @@
+diff --git a/kernel/relay.c b/kernel/relay.c
+index e9dbaeb8fd65..509f68fb91e8 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -339,6 +339,10 @@ static void wakeup_readers(unsigned long data)
{
struct rchan_buf *buf = (struct rchan_buf *)data;
wake_up_interruptible(&buf->read_wait);
@@ -18993,7 +20183,7 @@ diff -Nur linux-4.1.26.orig/kernel/relay.c linux-4.1.26/kernel/relay.c
}
/**
-@@ -356,6 +360,7 @@
+@@ -356,6 +360,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
init_waitqueue_head(&buf->read_wait);
kref_init(&buf->kref);
setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
@@ -19001,7 +20191,7 @@ diff -Nur linux-4.1.26.orig/kernel/relay.c linux-4.1.26/kernel/relay.c
} else
del_timer_sync(&buf->timer);
-@@ -739,15 +744,6 @@
+@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
else
buf->early_bytes += buf->chan->subbuf_size -
buf->padding[old_subbuf];
@@ -19017,10 +20207,24 @@ diff -Nur linux-4.1.26.orig/kernel/relay.c linux-4.1.26/kernel/relay.c
}
old = buf->data;
-diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/completion.c
---- linux-4.1.26.orig/kernel/sched/completion.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/completion.c 2016-06-19 15:30:58.699297812 +0200
-@@ -30,10 +30,10 @@
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 46be87024875..3944d32a044d 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -13,7 +13,7 @@ endif
+
+ obj-y += core.o proc.o clock.o cputime.o
+ obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
+-obj-y += wait.o completion.o idle.o
++obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+ obj-$(CONFIG_SCHEDSTATS) += stats.o
+diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
+index 8d0f35debf35..45ebcffd9feb 100644
+--- a/kernel/sched/completion.c
++++ b/kernel/sched/completion.c
+@@ -30,10 +30,10 @@ void complete(struct completion *x)
{
unsigned long flags;
@@ -19034,7 +20238,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/
}
EXPORT_SYMBOL(complete);
-@@ -50,10 +50,10 @@
+@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
@@ -19048,7 +20252,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/
}
EXPORT_SYMBOL(complete_all);
-@@ -62,20 +62,20 @@
+@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
@@ -19074,7 +20278,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/
if (!x->done)
return timeout;
}
-@@ -89,9 +89,9 @@
+@@ -89,9 +89,9 @@ __wait_for_common(struct completion *x,
{
might_sleep();
@@ -19086,7 +20290,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/
return timeout;
}
-@@ -277,12 +277,12 @@
+@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x)
if (!READ_ONCE(x->done))
return 0;
@@ -19101,7 +20305,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
-@@ -311,7 +311,7 @@
+@@ -311,7 +311,7 @@ bool completion_done(struct completion *x)
* after it's acquired the lock.
*/
smp_rmb();
@@ -19110,10 +20314,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/completion.c linux-4.1.26/kernel/sched/
return true;
}
EXPORT_SYMBOL(completion_done);
-diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
---- linux-4.1.26.orig/kernel/sched/core.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/core.c 2016-06-19 15:30:58.703297966 +0200
-@@ -282,7 +282,11 @@
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 6cb5f00696f5..0d3a40b24304 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -282,7 +282,11 @@ late_initcall(sched_init_debug);
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
@@ -19125,7 +20330,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* period over which we average the RT time consumption, measured
-@@ -461,6 +465,7 @@
+@@ -461,6 +465,7 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
@@ -19133,7 +20338,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
-@@ -541,6 +546,52 @@
+@@ -541,6 +546,52 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif
#endif
@@ -19186,7 +20391,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* resched_curr - mark rq's current task 'to be rescheduled now'.
*
-@@ -572,6 +623,38 @@
+@@ -572,6 +623,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -19225,7 +20430,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -595,12 +678,14 @@
+@@ -595,12 +678,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(int pinned)
{
@@ -19242,7 +20447,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -613,6 +698,8 @@
+@@ -613,6 +698,8 @@ int get_nohz_timer_target(int pinned)
}
unlock:
rcu_read_unlock();
@@ -19251,7 +20456,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
return cpu;
}
/*
-@@ -1164,6 +1251,18 @@
+@@ -1164,6 +1251,18 @@ struct migration_arg {
static int migration_cpu_stop(void *data);
@@ -19270,7 +20475,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1208,7 +1307,7 @@
+@@ -1208,7 +1307,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -19279,7 +20484,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
return 0;
cpu_relax();
}
-@@ -1223,7 +1322,8 @@
+@@ -1223,7 +1322,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -19289,7 +20494,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
-@@ -1449,10 +1549,6 @@
+@@ -1449,10 +1549,6 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -19300,7 +20505,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
}
/*
-@@ -1462,9 +1558,9 @@
+@@ -1462,9 +1558,9 @@ static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
check_preempt_curr(rq, p, wake_flags);
@@ -19312,7 +20517,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
-@@ -1666,8 +1762,29 @@
+@@ -1666,8 +1762,29 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -19343,7 +20548,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
-@@ -1710,42 +1827,6 @@
+@@ -1732,42 +1849,6 @@ out:
}
/**
@@ -19386,7 +20591,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -1759,11 +1840,23 @@
+@@ -1781,11 +1862,23 @@ out:
*/
int wake_up_process(struct task_struct *p)
{
@@ -19411,7 +20616,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
-@@ -1959,6 +2052,9 @@
+@@ -1981,6 +2074,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -19421,7 +20626,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2094,7 +2190,7 @@
+@@ -2116,7 +2212,7 @@ void wake_up_new_task(struct task_struct *p)
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -19430,7 +20635,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
-@@ -2231,8 +2327,12 @@
+@@ -2253,8 +2349,12 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -19444,7 +20649,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -2543,16 +2643,6 @@
+@@ -2565,16 +2665,6 @@ u64 scheduler_tick_max_deferment(void)
}
#endif
@@ -19461,7 +20666,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
-@@ -2574,7 +2664,7 @@
+@@ -2596,7 +2686,7 @@ void preempt_count_add(int val)
PREEMPT_MASK - 10);
#endif
if (preempt_count() == val) {
@@ -19470,7 +20675,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
#ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = ip;
#endif
-@@ -2601,7 +2691,7 @@
+@@ -2623,7 +2713,7 @@ void preempt_count_sub(int val)
#endif
if (preempt_count() == val)
@@ -19479,7 +20684,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
__preempt_count_sub(val);
}
EXPORT_SYMBOL(preempt_count_sub);
-@@ -2657,6 +2747,133 @@
+@@ -2679,6 +2769,133 @@ static inline void schedule_debug(struct task_struct *prev)
schedstat_inc(this_rq(), sched_count);
}
@@ -19613,7 +20818,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* Pick up the highest-prio task:
*/
-@@ -2763,6 +2980,8 @@
+@@ -2785,6 +3002,8 @@ static void __sched __schedule(void)
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
@@ -19622,7 +20827,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
switch_count = &prev->nivcsw;
-@@ -2772,19 +2991,6 @@
+@@ -2794,19 +3013,6 @@ static void __sched __schedule(void)
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -19642,7 +20847,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
}
switch_count = &prev->nvcsw;
}
-@@ -2794,6 +3000,7 @@
+@@ -2816,6 +3022,7 @@ static void __sched __schedule(void)
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
@@ -19650,7 +20855,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -2814,8 +3021,19 @@
+@@ -2836,8 +3043,19 @@ static void __sched __schedule(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -19671,7 +20876,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -2824,6 +3042,12 @@
+@@ -2846,6 +3064,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
@@ -19684,7 +20889,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -2832,6 +3056,7 @@
+@@ -2854,6 +3078,7 @@ asmlinkage __visible void __sched schedule(void)
do {
__schedule();
} while (need_resched());
@@ -19692,7 +20897,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
}
EXPORT_SYMBOL(schedule);
-@@ -2881,6 +3106,30 @@
+@@ -2903,6 +3128,30 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched());
}
@@ -19713,7 +20918,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
+
+#else
+
-+static int preemptible_lazy(void)
++static inline int preemptible_lazy(void)
+{
+ return 1;
+}
@@ -19723,7 +20928,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -2895,6 +3144,8 @@
+@@ -2917,6 +3166,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;
@@ -19732,7 +20937,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
preempt_schedule_common();
}
-@@ -2922,6 +3173,8 @@
+@@ -2944,6 +3195,8 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
if (likely(!preemptible()))
return;
@@ -19741,7 +20946,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
do {
__preempt_count_add(PREEMPT_ACTIVE);
-@@ -2931,7 +3184,16 @@
+@@ -2953,7 +3206,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -19758,7 +20963,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
exception_exit(prev_ctx);
__preempt_count_sub(PREEMPT_ACTIVE);
-@@ -4268,6 +4530,7 @@
+@@ -4290,6 +4552,7 @@ int __cond_resched_lock(spinlock_t *lock)
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -19766,7 +20971,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4281,6 +4544,7 @@
+@@ -4303,6 +4566,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
@@ -19774,7 +20979,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/**
* yield - yield the current processor to other threads.
-@@ -4635,7 +4899,9 @@
+@@ -4659,7 +4923,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -19785,7 +20990,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -4755,11 +5021,91 @@
+@@ -4779,11 +5045,91 @@ static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -19880,7 +21085,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
}
/*
-@@ -4805,7 +5151,7 @@
+@@ -4829,7 +5175,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@@ -19889,7 +21094,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4945,6 +5291,8 @@
+@@ -4969,6 +5315,8 @@ static int migration_cpu_stop(void *data)
#ifdef CONFIG_HOTPLUG_CPU
@@ -19898,7 +21103,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -4959,7 +5307,11 @@
+@@ -4983,7 +5331,11 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -19911,7 +21116,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
}
/*
-@@ -5302,6 +5654,10 @@
+@@ -5326,6 +5678,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD:
calc_load_migrate(rq);
@@ -19922,7 +21127,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
break;
#endif
}
-@@ -7281,7 +7637,8 @@
+@@ -7305,7 +7661,8 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
@@ -19932,10 +21137,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/core.c linux-4.1.26/kernel/sched/core.c
return (nested == preempt_offset);
}
-diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cputime.c
---- linux-4.1.26.orig/kernel/sched/cputime.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/cputime.c 2016-06-19 15:30:58.703297966 +0200
-@@ -675,37 +675,45 @@
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 87b8576cbd50..2ee44eb30f2b 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -675,37 +675,45 @@ static void __vtime_account_system(struct task_struct *tsk)
void vtime_account_system(struct task_struct *tsk)
{
@@ -19989,7 +21195,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cpu
}
void vtime_guest_enter(struct task_struct *tsk)
-@@ -717,19 +725,23 @@
+@@ -717,19 +725,23 @@ void vtime_guest_enter(struct task_struct *tsk)
* synchronization against the reader (task_gtime())
* that can thus safely catch up with a tickless delta.
*/
@@ -20017,7 +21223,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cpu
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
-@@ -742,24 +754,30 @@
+@@ -742,24 +754,30 @@ void vtime_account_idle(struct task_struct *tsk)
void arch_vtime_task_switch(struct task_struct *prev)
{
@@ -20054,7 +21260,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cpu
}
cputime_t task_gtime(struct task_struct *t)
-@@ -768,13 +786,13 @@
+@@ -768,13 +786,13 @@ cputime_t task_gtime(struct task_struct *t)
cputime_t gtime;
do {
@@ -20070,7 +21276,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cpu
return gtime;
}
-@@ -797,7 +815,7 @@
+@@ -797,7 +815,7 @@ fetch_task_cputime(struct task_struct *t,
*udelta = 0;
*sdelta = 0;
@@ -20079,7 +21285,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cpu
if (u_dst)
*u_dst = *u_src;
-@@ -821,7 +839,7 @@
+@@ -821,7 +839,7 @@ fetch_task_cputime(struct task_struct *t,
if (t->vtime_snap_whence == VTIME_SYS)
*sdelta = delta;
}
@@ -20088,10 +21294,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/cputime.c linux-4.1.26/kernel/sched/cpu
}
-diff -Nur linux-4.1.26.orig/kernel/sched/deadline.c linux-4.1.26/kernel/sched/deadline.c
---- linux-4.1.26.orig/kernel/sched/deadline.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/deadline.c 2016-06-19 15:30:58.703297966 +0200
-@@ -637,6 +637,7 @@
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 5e95145088fd..0c261c5114e0 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -637,6 +637,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
@@ -20099,10 +21306,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/deadline.c linux-4.1.26/kernel/sched/de
}
static
-diff -Nur linux-4.1.26.orig/kernel/sched/debug.c linux-4.1.26/kernel/sched/debug.c
---- linux-4.1.26.orig/kernel/sched/debug.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/debug.c 2016-06-19 15:30:58.703297966 +0200
-@@ -260,6 +260,9 @@
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index a245c1fc6f0a..34b00001b00a 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -260,6 +260,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
@@ -20112,7 +21320,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/debug.c linux-4.1.26/kernel/sched/debug
#undef PN
#undef P
-@@ -648,6 +651,10 @@
+@@ -648,6 +651,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#endif
P(policy);
P(prio);
@@ -20123,10 +21331,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/debug.c linux-4.1.26/kernel/sched/debug
#undef PN
#undef __PN
#undef P
-diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
---- linux-4.1.26.orig/kernel/sched/fair.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/fair.c 2016-06-19 15:30:58.703297966 +0200
-@@ -3201,7 +3201,7 @@
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 77690b653ca9..7aae8d27611e 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3201,7 +3201,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -20135,7 +21344,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3225,7 +3225,7 @@
+@@ -3225,7 +3225,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;
if (delta > ideal_runtime)
@@ -20144,7 +21353,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
}
static void
-@@ -3366,7 +3366,7 @@
+@@ -3366,7 +3366,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* validating it and just reschedule.
*/
if (queued) {
@@ -20153,7 +21362,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
return;
}
/*
-@@ -3557,7 +3557,7 @@
+@@ -3557,7 +3557,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -20162,7 +21371,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
}
static __always_inline
-@@ -4180,7 +4180,7 @@
+@@ -4180,7 +4180,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
if (delta < 0) {
if (rq->curr == p)
@@ -20171,7 +21380,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
return;
}
hrtick_start(rq, delta);
-@@ -5076,7 +5076,7 @@
+@@ -5076,7 +5076,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
@@ -20180,7 +21389,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7869,7 +7869,7 @@
+@@ -7869,7 +7869,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -20189,7 +21398,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -7894,7 +7894,7 @@
+@@ -7894,7 +7894,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -20198,10 +21407,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/fair.c linux-4.1.26/kernel/sched/fair.c
} else
check_preempt_curr(rq, p, 0);
}
-diff -Nur linux-4.1.26.orig/kernel/sched/features.h linux-4.1.26/kernel/sched/features.h
---- linux-4.1.26.orig/kernel/sched/features.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/features.h 2016-06-19 15:30:58.707298121 +0200
-@@ -50,11 +50,19 @@
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 91e33cd485f6..0ea4e37751d7 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -50,11 +50,19 @@ SCHED_FEAT(LB_BIAS, true)
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
@@ -20221,22 +21431,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/features.h linux-4.1.26/kernel/sched/fe
#ifdef HAVE_RT_PUSH_IPI
/*
-diff -Nur linux-4.1.26.orig/kernel/sched/Makefile linux-4.1.26/kernel/sched/Makefile
---- linux-4.1.26.orig/kernel/sched/Makefile 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/Makefile 2016-06-19 15:30:58.699297812 +0200
-@@ -13,7 +13,7 @@
-
- obj-y += core.o proc.o clock.o cputime.o
- obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
--obj-y += wait.o completion.o idle.o
-+obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
- obj-$(CONFIG_SCHEDSTATS) += stats.o
-diff -Nur linux-4.1.26.orig/kernel/sched/rt.c linux-4.1.26/kernel/sched/rt.c
---- linux-4.1.26.orig/kernel/sched/rt.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/rt.c 2016-06-19 15:30:58.707298121 +0200
-@@ -44,6 +44,7 @@
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 575da76a3874..637aa208a58d 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -44,6 +44,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -20244,7 +21443,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/rt.c linux-4.1.26/kernel/sched/rt.c
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
-@@ -89,6 +90,7 @@
+@@ -89,6 +90,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
@@ -20252,10 +21451,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/rt.c linux-4.1.26/kernel/sched/rt.c
#endif
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
-diff -Nur linux-4.1.26.orig/kernel/sched/sched.h linux-4.1.26/kernel/sched/sched.h
---- linux-4.1.26.orig/kernel/sched/sched.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/sched/sched.h 2016-06-19 15:30:58.707298121 +0200
-@@ -1093,6 +1093,7 @@
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index aa1f059de4f7..1bdd1e5f056d 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1093,6 +1093,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
@@ -20263,7 +21463,7 @@ diff -Nur linux-4.1.26.orig/kernel/sched/sched.h linux-4.1.26/kernel/sched/sched
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
-@@ -1290,6 +1291,15 @@
+@@ -1290,6 +1291,15 @@ extern void init_sched_dl_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -20279,9 +21479,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/sched.h linux-4.1.26/kernel/sched/sched
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-diff -Nur linux-4.1.26.orig/kernel/sched/wait-simple.c linux-4.1.26/kernel/sched/wait-simple.c
---- linux-4.1.26.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/kernel/sched/wait-simple.c 2016-06-19 15:30:58.707298121 +0200
+diff --git a/kernel/sched/wait-simple.c b/kernel/sched/wait-simple.c
+new file mode 100644
+index 000000000000..7dfa86d1f654
+--- /dev/null
++++ b/kernel/sched/wait-simple.c
@@ -0,0 +1,115 @@
+/*
+ * Simple waitqueues without fancy flags and callbacks
@@ -20398,9 +21600,11 @@ diff -Nur linux-4.1.26.orig/kernel/sched/wait-simple.c linux-4.1.26/kernel/sched
+ return woken;
+}
+EXPORT_SYMBOL(__swait_wake);
-diff -Nur linux-4.1.26.orig/kernel/sched/work-simple.c linux-4.1.26/kernel/sched/work-simple.c
---- linux-4.1.26.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/kernel/sched/work-simple.c 2016-06-19 15:30:58.707298121 +0200
+diff --git a/kernel/sched/work-simple.c b/kernel/sched/work-simple.c
+new file mode 100644
+index 000000000000..e57a0522573f
+--- /dev/null
++++ b/kernel/sched/work-simple.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
@@ -20575,9 +21779,10 @@ diff -Nur linux-4.1.26.orig/kernel/sched/work-simple.c linux-4.1.26/kernel/sched
+ mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
-diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
---- linux-4.1.26.orig/kernel/signal.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/signal.c 2016-06-19 15:30:58.707298121 +0200
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 0206be728dac..1336e4c016ba 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/init.h>
@@ -20586,7 +21791,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-@@ -352,13 +353,45 @@
+@@ -352,13 +353,45 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
@@ -20633,7 +21838,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
{
struct sigqueue *q = NULL;
struct user_struct *user;
-@@ -375,7 +408,10 @@
+@@ -375,7 +408,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -20645,7 +21850,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
} else {
print_dropped_signal(sig);
}
-@@ -392,6 +428,13 @@
+@@ -392,6 +428,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
return q;
}
@@ -20659,7 +21864,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
-@@ -401,6 +444,21 @@
+@@ -401,6 +444,21 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
@@ -20681,7 +21886,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
-@@ -414,6 +472,21 @@
+@@ -414,6 +472,21 @@ void flush_sigqueue(struct sigpending *queue)
}
/*
@@ -20703,7 +21908,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
* Flush all pending signals for a task.
*/
void __flush_signals(struct task_struct *t)
-@@ -565,7 +638,7 @@
+@@ -565,7 +638,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
@@ -20712,7 +21917,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
} else {
/*
* Ok, it wasn't in the queue. This must be
-@@ -611,6 +684,8 @@
+@@ -611,6 +684,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
int signr;
@@ -20721,7 +21926,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1207,8 +1282,8 @@
+@@ -1207,8 +1282,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -20732,7 +21937,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1233,6 +1308,39 @@
+@@ -1233,6 +1308,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
@@ -20772,7 +21977,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
/*
* Nuke all other threads in the group.
*/
-@@ -1267,12 +1375,12 @@
+@@ -1267,12 +1375,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
* Disable interrupts early to avoid deadlocks.
* See rcu_read_unlock() comment header for details.
*/
@@ -20787,7 +21992,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
break;
}
/*
-@@ -1293,7 +1401,7 @@
+@@ -1293,7 +1401,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
@@ -20796,7 +22001,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
}
return sighand;
-@@ -1536,7 +1644,8 @@
+@@ -1536,7 +1644,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
@@ -20806,7 +22011,7 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
if (q)
q->flags |= SIGQUEUE_PREALLOC;
-@@ -1897,15 +2006,7 @@
+@@ -1897,15 +2006,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
@@ -20822,9 +22027,10 @@ diff -Nur linux-4.1.26.orig/kernel/signal.c linux-4.1.26/kernel/signal.c
freezable_schedule();
} else {
/*
-diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
---- linux-4.1.26.orig/kernel/softirq.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/softirq.c 2016-06-19 15:30:58.707298121 +0200
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 479e4436f787..cb9c1d5dee10 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
@@ -21,10 +21,12 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -20838,7 +22044,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
#include <linux/irq.h>
#define CREATE_TRACE_POINTS
-@@ -56,12 +58,108 @@
+@@ -56,12 +58,108 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
@@ -20947,7 +22153,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
-@@ -77,6 +175,79 @@
+@@ -77,6 +175,79 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
@@ -21027,7 +22233,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -116,9 +287,9 @@
+@@ -116,9 +287,9 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
@@ -21039,7 +22245,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
}
}
EXPORT_SYMBOL(__local_bh_disable_ip);
-@@ -232,10 +403,8 @@
+@@ -232,10 +403,8 @@ asmlinkage __visible void __do_softirq(void)
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
@@ -21050,7 +22256,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
-@@ -254,36 +423,7 @@
+@@ -254,36 +423,7 @@ restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -21088,7 +22294,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
pending = local_softirq_pending();
if (pending) {
-@@ -320,6 +460,310 @@
+@@ -320,6 +460,310 @@ asmlinkage __visible void do_softirq(void)
}
/*
@@ -21399,7 +22605,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
* Enter an interrupt context.
*/
void irq_enter(void)
-@@ -330,9 +774,9 @@
+@@ -330,9 +774,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
@@ -21411,7 +22617,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
}
__irq_enter();
-@@ -340,6 +784,7 @@
+@@ -340,6 +784,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
@@ -21419,7 +22625,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
-@@ -359,6 +804,18 @@
+@@ -359,6 +804,18 @@ static inline void invoke_softirq(void)
} else {
wakeup_softirqd();
}
@@ -21438,7 +22644,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
}
static inline void tick_irq_exit(void)
-@@ -395,26 +852,6 @@
+@@ -395,26 +852,6 @@ void irq_exit(void)
trace_hardirq_exit(); /* must be last! */
}
@@ -21465,7 +22671,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
void raise_softirq(unsigned int nr)
{
unsigned long flags;
-@@ -424,12 +861,6 @@
+@@ -424,12 +861,6 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
@@ -21478,7 +22684,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
-@@ -446,15 +877,45 @@
+@@ -446,15 +877,45 @@ struct tasklet_head {
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
@@ -21528,7 +22734,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
local_irq_restore(flags);
}
EXPORT_SYMBOL(__tasklet_schedule);
-@@ -464,10 +925,7 @@
+@@ -464,10 +925,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
@@ -21540,7 +22746,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
local_irq_restore(flags);
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
-@@ -476,82 +934,122 @@
+@@ -476,82 +934,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
BUG_ON(!irqs_disabled());
@@ -21712,7 +22918,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
}
void tasklet_init(struct tasklet_struct *t,
-@@ -572,7 +1070,7 @@
+@@ -572,7 +1070,7 @@ void tasklet_kill(struct tasklet_struct *t)
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
@@ -21721,7 +22927,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
-@@ -646,25 +1144,26 @@
+@@ -646,25 +1144,26 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
@@ -21763,7 +22969,7 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
}
#ifdef CONFIG_HOTPLUG_CPU
-@@ -746,16 +1245,31 @@
+@@ -746,16 +1245,31 @@ static struct notifier_block cpu_nfb = {
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
@@ -21795,10 +23001,11 @@ diff -Nur linux-4.1.26.orig/kernel/softirq.c linux-4.1.26/kernel/softirq.c
return 0;
}
-diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machine.c
---- linux-4.1.26.orig/kernel/stop_machine.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/stop_machine.c 2016-06-19 15:30:58.707298121 +0200
-@@ -35,7 +35,7 @@
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 695f0c6cd169..d3ea2452e291 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -35,7 +35,7 @@ struct cpu_stop_done {
/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
@@ -21807,7 +23014,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
};
-@@ -78,7 +78,7 @@
+@@ -78,7 +78,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
unsigned long flags;
@@ -21816,7 +23023,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
if (stopper->enabled) {
list_add_tail(&work->list, &stopper->works);
-@@ -86,7 +86,7 @@
+@@ -86,7 +86,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
} else
cpu_stop_signal_done(work->done, false);
@@ -21825,7 +23032,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
}
/**
-@@ -248,7 +248,7 @@
+@@ -248,7 +248,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
struct irq_cpu_stop_queue_work_info call_args;
struct multi_stop_data msdata;
@@ -21834,7 +23041,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
msdata = (struct multi_stop_data){
.fn = fn,
.data = arg,
-@@ -281,7 +281,7 @@
+@@ -281,7 +281,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
* This relies on the stopper workqueues to be FIFO.
*/
if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
@@ -21843,7 +23050,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
return -ENOENT;
}
-@@ -295,7 +295,7 @@
+@@ -295,7 +295,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
&irq_cpu_stop_queue_work,
&call_args, 1);
lg_local_unlock(&stop_cpus_lock);
@@ -21852,7 +23059,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
wait_for_completion(&done.completion);
-@@ -329,7 +329,7 @@
+@@ -329,7 +329,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
static void queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
@@ -21861,7 +23068,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
{
struct cpu_stop_work *work;
unsigned int cpu;
-@@ -343,11 +343,13 @@
+@@ -343,11 +343,13 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
}
/*
@@ -21879,7 +23086,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
lg_global_unlock(&stop_cpus_lock);
-@@ -359,7 +361,7 @@
+@@ -359,7 +361,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
struct cpu_stop_done done;
cpu_stop_init_done(&done, cpumask_weight(cpumask));
@@ -21888,7 +23095,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
wait_for_completion(&done.completion);
return done.executed ? done.ret : -ENOENT;
}
-@@ -439,9 +441,9 @@
+@@ -439,9 +441,9 @@ static int cpu_stop_should_run(unsigned int cpu)
unsigned long flags;
int run;
@@ -21900,7 +23107,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
return run;
}
-@@ -453,13 +455,13 @@
+@@ -453,13 +455,13 @@ static void cpu_stopper_thread(unsigned int cpu)
repeat:
work = NULL;
@@ -21916,7 +23123,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
if (work) {
cpu_stop_fn_t fn = work->fn;
-@@ -467,6 +469,16 @@
+@@ -467,6 +469,16 @@ repeat:
struct cpu_stop_done *done = work->done;
char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
@@ -21933,7 +23140,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
/* cpu stop callbacks are not allowed to sleep */
preempt_disable();
-@@ -500,20 +512,20 @@
+@@ -500,20 +512,20 @@ static void cpu_stop_park(unsigned int cpu)
unsigned long flags;
/* drain remaining works */
@@ -21958,7 +23165,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
}
static struct smp_hotplug_thread cpu_stop_threads = {
-@@ -535,10 +547,12 @@
+@@ -535,10 +547,12 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -21972,7 +23179,7 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
stop_machine_initialized = true;
return 0;
-@@ -634,7 +648,7 @@
+@@ -634,7 +648,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
set_state(&msdata, MULTI_STOP_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
@@ -21981,9 +23188,10 @@ diff -Nur linux-4.1.26.orig/kernel/stop_machine.c linux-4.1.26/kernel/stop_machi
ret = multi_cpu_stop(&msdata);
/* Busy wait for completion. */
-diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtimer.c
---- linux-4.1.26.orig/kernel/time/hrtimer.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/hrtimer.c 2016-06-19 15:30:58.707298121 +0200
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 93ef7190bdea..2c6be169bdc7 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
@@ -48,11 +48,13 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
@@ -21998,7 +23206,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
#include "tick-internal.h"
-@@ -576,8 +578,7 @@
+@@ -576,8 +578,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
@@ -22008,7 +23216,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
*/
if (hrtimer_callback_running(timer))
return 0;
-@@ -621,6 +622,9 @@
+@@ -621,6 +622,9 @@ static int hrtimer_reprogram(struct hrtimer *timer,
return res;
}
@@ -22018,7 +23226,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* Initialize the high resolution related parts of cpu_base
*/
-@@ -630,6 +634,21 @@
+@@ -630,6 +634,21 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
base->hres_active = 0;
}
@@ -22040,7 +23248,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
-@@ -695,6 +714,44 @@
+@@ -695,6 +714,44 @@ static void clock_was_set_work(struct work_struct *work)
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
@@ -22085,7 +23293,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* Called from timekeeping and resume code to reprogramm the hrtimer
* interrupt device on all cpus.
-@@ -703,6 +760,7 @@
+@@ -703,6 +760,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
@@ -22093,7 +23301,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
#else
-@@ -711,6 +769,13 @@
+@@ -711,6 +769,13 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
@@ -22107,7 +23315,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
static inline int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
-@@ -718,7 +783,6 @@
+@@ -718,7 +783,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
@@ -22115,7 +23323,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
-@@ -836,6 +900,32 @@
+@@ -836,6 +900,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -22148,7 +23356,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -879,6 +969,11 @@
+@@ -879,6 +969,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
@@ -22160,7 +23368,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
-@@ -966,7 +1061,16 @@
+@@ -966,7 +1061,16 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
timer_stats_hrtimer_set_start_info(timer);
@@ -22177,7 +23385,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost) {
-@@ -980,15 +1084,26 @@
+@@ -980,15 +1084,26 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* on dynticks target.
*/
wake_up_nohz_cpu(new_base->cpu_base->cpu);
@@ -22207,7 +23415,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* We need to drop cpu_base->lock to avoid a
* lock ordering issue vs. rq->lock.
-@@ -996,9 +1111,7 @@
+@@ -996,9 +1111,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
raw_spin_unlock(&new_base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
local_irq_restore(flags);
@@ -22218,7 +23426,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
}
-@@ -1089,7 +1202,7 @@
+@@ -1089,7 +1202,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
@@ -22227,7 +23435,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1153,6 +1266,7 @@
+@@ -1153,6 +1266,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -22235,7 +23443,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1236,6 +1350,126 @@
+@@ -1236,6 +1350,126 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@@ -22362,7 +23570,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
#ifdef CONFIG_HIGH_RES_TIMERS
/*
-@@ -1246,7 +1480,7 @@
+@@ -1246,7 +1480,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
@@ -22371,7 +23579,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
-@@ -1281,6 +1515,15 @@
+@@ -1281,6 +1515,15 @@ retry:
timer = container_of(node, struct hrtimer, node);
@@ -22387,7 +23595,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
-@@ -1296,7 +1539,10 @@
+@@ -1296,7 +1539,10 @@ retry:
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
@@ -22399,7 +23607,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
}
/* Reevaluate the clock bases for the next expiry */
-@@ -1313,7 +1559,7 @@
+@@ -1313,7 +1559,7 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -22408,7 +23616,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
/*
-@@ -1357,6 +1603,9 @@
+@@ -1357,6 +1603,9 @@ retry:
tick_program_event(expires_next, 1);
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
ktime_to_ns(delta));
@@ -22418,7 +23626,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
/*
-@@ -1392,18 +1641,18 @@
+@@ -1392,18 +1641,18 @@ void hrtimer_peek_ahead_timers(void)
__hrtimer_peek_ahead_timers();
local_irq_restore(flags);
}
@@ -22443,7 +23651,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
-@@ -1436,7 +1685,7 @@
+@@ -1436,7 +1685,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base;
@@ -22452,7 +23660,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
if (hrtimer_hres_active())
return;
-@@ -1461,10 +1710,16 @@
+@@ -1461,10 +1710,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
@@ -22470,7 +23678,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
/*
-@@ -1486,16 +1741,18 @@
+@@ -1486,16 +1741,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -22491,7 +23699,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
hrtimer_start_expires(&t->timer, mode);
if (!hrtimer_active(&t->timer))
t->task = NULL;
-@@ -1539,7 +1796,8 @@
+@@ -1539,7 +1796,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -22501,7 +23709,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1556,8 +1814,10 @@
+@@ -1556,8 +1814,10 @@ out:
return ret;
}
@@ -22514,7 +23722,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1570,7 +1830,7 @@
+@@ -1570,7 +1830,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -22523,7 +23731,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1597,6 +1857,12 @@
+@@ -1597,6 +1857,12 @@ out:
return ret;
}
@@ -22536,7 +23744,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1611,6 +1877,26 @@
+@@ -1611,6 +1877,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
@@ -22563,7 +23771,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
/*
* Functions related to boot-time initialization:
*/
-@@ -1622,10 +1908,14 @@
+@@ -1622,10 +1908,14 @@ static void init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -22578,7 +23786,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
#ifdef CONFIG_HOTPLUG_CPU
-@@ -1731,9 +2021,7 @@
+@@ -1731,9 +2021,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
@@ -22588,10 +23796,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/hrtimer.c linux-4.1.26/kernel/time/hrtim
}
/**
-diff -Nur linux-4.1.26.orig/kernel/time/itimer.c linux-4.1.26/kernel/time/itimer.c
---- linux-4.1.26.orig/kernel/time/itimer.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/itimer.c 2016-06-19 15:30:58.711298275 +0200
-@@ -213,6 +213,7 @@
+diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
+index 8d262b467573..d0513909d663 100644
+--- a/kernel/time/itimer.c
++++ b/kernel/time/itimer.c
+@@ -213,6 +213,7 @@ again:
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
@@ -22599,10 +23808,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/itimer.c linux-4.1.26/kernel/time/itimer
goto again;
}
expires = timeval_to_ktime(value->it_value);
-diff -Nur linux-4.1.26.orig/kernel/time/jiffies.c linux-4.1.26/kernel/time/jiffies.c
---- linux-4.1.26.orig/kernel/time/jiffies.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/jiffies.c 2016-06-19 15:30:58.711298275 +0200
-@@ -74,7 +74,8 @@
+diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
+index 347fecf86a3f..2ede47408a3e 100644
+--- a/kernel/time/jiffies.c
++++ b/kernel/time/jiffies.c
+@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
.max_cycles = 10,
};
@@ -22612,7 +23822,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/jiffies.c linux-4.1.26/kernel/time/jiffi
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
-@@ -83,9 +84,9 @@
+@@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
@@ -22624,9 +23834,10 @@ diff -Nur linux-4.1.26.orig/kernel/time/jiffies.c linux-4.1.26/kernel/time/jiffi
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
-diff -Nur linux-4.1.26.orig/kernel/time/ntp.c linux-4.1.26/kernel/time/ntp.c
---- linux-4.1.26.orig/kernel/time/ntp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/ntp.c 2016-06-19 15:30:58.711298275 +0200
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 7a681003001c..bd9c53985d32 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/hrtimer.h>
@@ -22635,7 +23846,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/ntp.c linux-4.1.26/kernel/time/ntp.c
#include <linux/math64.h>
#include <linux/timex.h>
#include <linux/time.h>
-@@ -529,10 +530,52 @@
+@@ -529,10 +530,52 @@ static void sync_cmos_clock(struct work_struct *work)
&sync_cmos_work, timespec_to_jiffies(&next));
}
@@ -22688,9 +23899,10 @@ diff -Nur linux-4.1.26.orig/kernel/time/ntp.c linux-4.1.26/kernel/time/ntp.c
#else
void ntp_notify_cmos_timer(void) { }
-diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/time/posix-cpu-timers.c
---- linux-4.1.26.orig/kernel/time/posix-cpu-timers.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/posix-cpu-timers.c 2016-06-19 15:30:58.711298275 +0200
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 57d1acb91c56..5b24aefef595 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
@@ -3,6 +3,7 @@
*/
@@ -22699,7 +23911,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/t
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
-@@ -626,7 +627,7 @@
+@@ -626,7 +627,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
@@ -22708,7 +23920,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/t
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1047,7 +1048,7 @@
+@@ -1048,7 +1049,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
@@ -22717,7 +23929,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/t
arm_timer(timer);
unlock_task_sighand(p, &flags);
-@@ -1113,10 +1114,11 @@
+@@ -1114,10 +1115,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
sig = tsk->signal;
if (sig->cputimer.running) {
struct task_cputime group_sample;
@@ -22731,7 +23943,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/t
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
-@@ -1130,13 +1132,13 @@
+@@ -1131,13 +1133,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -22747,7 +23959,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/t
/*
* The fast path checks that there are no expired thread or thread
-@@ -1194,6 +1196,190 @@
+@@ -1195,6 +1197,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
}
}
@@ -22938,10 +24150,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-cpu-timers.c linux-4.1.26/kernel/t
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
-diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/posix-timers.c
---- linux-4.1.26.orig/kernel/time/posix-timers.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/posix-timers.c 2016-06-19 15:30:58.711298275 +0200
-@@ -499,6 +499,7 @@
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 31ea01f42e1f..0f5d7eae61f0 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -499,6 +499,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
@@ -22949,7 +24162,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -507,7 +508,8 @@
+@@ -507,7 +508,8 @@ static struct pid *good_sigevent(sigevent_t * event)
return NULL;
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
@@ -22959,7 +24172,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
return NULL;
return task_pid(rtn);
-@@ -819,6 +821,20 @@
+@@ -819,6 +821,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
return overrun;
}
@@ -22980,7 +24193,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
-@@ -896,6 +912,7 @@
+@@ -896,6 +912,7 @@ retry:
if (!timr)
return -EINVAL;
@@ -22988,7 +24201,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
-@@ -904,9 +921,12 @@
+@@ -904,9 +921,12 @@ retry:
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
@@ -23001,7 +24214,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -944,10 +964,15 @@
+@@ -944,10 +964,15 @@ retry_delete:
if (!timer)
return -EINVAL;
@@ -23017,7 +24230,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
-@@ -973,8 +998,18 @@
+@@ -973,8 +998,18 @@ static void itimer_delete(struct k_itimer *timer)
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
@@ -23036,20 +24249,22 @@ diff -Nur linux-4.1.26.orig/kernel/time/posix-timers.c linux-4.1.26/kernel/time/
goto retry_delete;
}
list_del(&timer->list);
-diff -Nur linux-4.1.26.orig/kernel/time/tick-broadcast-hrtimer.c linux-4.1.26/kernel/time/tick-broadcast-hrtimer.c
---- linux-4.1.26.orig/kernel/time/tick-broadcast-hrtimer.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/tick-broadcast-hrtimer.c 2016-06-19 15:30:58.711298275 +0200
-@@ -109,5 +109,6 @@
+diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
+index 6aac4beedbbe..943c03395e46 100644
+--- a/kernel/time/tick-broadcast-hrtimer.c
++++ b/kernel/time/tick-broadcast-hrtimer.c
+@@ -109,5 +109,6 @@ void tick_setup_hrtimer_broadcast(void)
{
hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
bctimer.function = bc_handler;
+ bctimer.irqsafe = true;
clockevents_register_device(&ce_broadcast_hrtimer);
}
-diff -Nur linux-4.1.26.orig/kernel/time/tick-common.c linux-4.1.26/kernel/time/tick-common.c
---- linux-4.1.26.orig/kernel/time/tick-common.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/tick-common.c 2016-06-19 15:30:58.711298275 +0200
-@@ -78,13 +78,15 @@
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 3ae6afa1eb98..14a10917c8a3 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -78,13 +78,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
@@ -23067,7 +24282,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-common.c linux-4.1.26/kernel/time/t
update_wall_time();
}
-@@ -146,9 +148,9 @@
+@@ -146,9 +148,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
@@ -23079,10 +24294,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-common.c linux-4.1.26/kernel/time/t
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
-diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/tick-sched.c
---- linux-4.1.26.orig/kernel/time/tick-sched.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/tick-sched.c 2016-06-19 15:30:58.711298275 +0200
-@@ -62,7 +62,8 @@
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 914259128145..b3841ba00c69 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with jiffies_lock held */
@@ -23092,7 +24308,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
-@@ -85,10 +86,12 @@
+@@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
@@ -23107,7 +24323,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
update_wall_time();
}
-@@ -99,12 +102,14 @@
+@@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
@@ -23124,7 +24340,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
return period;
}
-@@ -176,6 +181,11 @@
+@@ -176,6 +181,11 @@ static bool can_stop_full_tick(void)
return false;
}
@@ -23136,7 +24352,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
/* sched_clock_tick() needs us? */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
-@@ -222,6 +232,7 @@
+@@ -222,6 +232,7 @@ static void nohz_full_kick_work_func(struct irq_work *work)
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
@@ -23144,7 +24360,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
};
/*
-@@ -578,10 +589,10 @@
+@@ -578,10 +589,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -23157,7 +24373,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
if (rcu_needs_cpu(&rcu_delta_jiffies) ||
arch_needs_cpu() || irq_work_needs_cpu()) {
-@@ -759,14 +770,7 @@
+@@ -759,14 +770,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -23173,7 +24389,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
return false;
}
-@@ -1154,6 +1158,7 @@
+@@ -1154,6 +1158,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -23181,10 +24397,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/tick-sched.c linux-4.1.26/kernel/time/ti
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
-diff -Nur linux-4.1.26.orig/kernel/time/timekeeping.c linux-4.1.26/kernel/time/timekeeping.c
---- linux-4.1.26.orig/kernel/time/timekeeping.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/timekeeping.c 2016-06-19 15:30:58.711298275 +0200
-@@ -2064,8 +2064,10 @@
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d9f112bd42a7..7713b181ccfa 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -2087,8 +2087,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
@@ -23197,10 +24414,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/timekeeping.c linux-4.1.26/kernel/time/t
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
-diff -Nur linux-4.1.26.orig/kernel/time/timekeeping.h linux-4.1.26/kernel/time/timekeeping.h
---- linux-4.1.26.orig/kernel/time/timekeeping.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/timekeeping.h 2016-06-19 15:30:58.711298275 +0200
-@@ -22,7 +22,8 @@
+diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
+index ead8794b9a4e..d7a9120a9f52 100644
+--- a/kernel/time/timekeeping.h
++++ b/kernel/time/timekeeping.h
+@@ -22,7 +22,8 @@ extern void timekeeping_resume(void);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
@@ -23210,10 +24428,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/timekeeping.h linux-4.1.26/kernel/time/t
#define CS_NAME_LEN 32
-diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
---- linux-4.1.26.orig/kernel/time/timer.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/time/timer.c 2016-06-19 15:30:58.711298275 +0200
-@@ -78,6 +78,9 @@
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 2ece3aa5069c..b1f9e6c5bec4 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -78,6 +78,9 @@ struct tvec_root {
struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
@@ -23223,7 +24442,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
unsigned long timer_jiffies;
unsigned long next_timer;
unsigned long active_timers;
-@@ -768,6 +771,36 @@
+@@ -768,6 +771,36 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
}
@@ -23260,7 +24479,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
bool pending_only, int pinned)
-@@ -798,14 +831,8 @@
+@@ -798,14 +831,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
@@ -23277,7 +24496,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
}
timer->expires = expires;
-@@ -979,6 +1006,29 @@
+@@ -979,6 +1006,29 @@ void add_timer_on(struct timer_list *timer, int cpu)
}
EXPORT_SYMBOL_GPL(add_timer_on);
@@ -23294,7 +24513,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
+ base->running_timer != timer);
+}
+
-+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
++# define wakeup_timer_waiters(b) wake_up_all(&(b)->wait_for_running_timer)
+#else
+static inline void wait_for_running_timer(struct timer_list *timer)
+{
@@ -23307,7 +24526,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -1036,7 +1086,7 @@
+@@ -1036,7 +1086,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
}
EXPORT_SYMBOL(try_to_del_timer_sync);
@@ -23316,7 +24535,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
/**
-@@ -1098,7 +1148,7 @@
+@@ -1098,7 +1148,7 @@ int del_timer_sync(struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -23325,7 +24544,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1219,15 +1269,17 @@
+@@ -1219,16 +1269,18 @@ static inline void __run_timers(struct tvec_base *base)
if (irqsafe) {
spin_unlock(&base->lock);
call_timer_fn(timer, fn, data);
@@ -23340,11 +24559,12 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
}
}
- base->running_timer = NULL;
-+ wakeup_timer_waiters(base);
spin_unlock_irq(&base->lock);
++ wakeup_timer_waiters(base);
}
-@@ -1367,6 +1419,14 @@
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -1367,6 +1419,14 @@ unsigned long get_next_timer_interrupt(unsigned long now)
if (cpu_is_offline(smp_processor_id()))
return expires;
@@ -23359,7 +24579,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
spin_lock(&base->lock);
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
-@@ -1392,13 +1452,13 @@
+@@ -1392,13 +1452,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
@@ -23375,7 +24595,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
run_posix_cpu_timers(p);
}
-@@ -1411,6 +1471,8 @@
+@@ -1411,6 +1471,8 @@ static void run_timer_softirq(struct softirq_action *h)
hrtimer_run_pending();
@@ -23384,7 +24604,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
}
-@@ -1566,7 +1628,7 @@
+@@ -1566,7 +1628,7 @@ static void migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
@@ -23393,7 +24613,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
-@@ -1590,7 +1652,7 @@
+@@ -1590,7 +1652,7 @@ static void migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
@@ -23402,7 +24622,7 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
}
static int timer_cpu_notify(struct notifier_block *self,
-@@ -1625,6 +1687,9 @@
+@@ -1625,6 +1687,9 @@ static void __init init_timer_cpu(struct tvec_base *base, int cpu)
base->cpu = cpu;
per_cpu(tvec_bases, cpu) = base;
spin_lock_init(&base->lock);
@@ -23412,10 +24632,11 @@ diff -Nur linux-4.1.26.orig/kernel/time/timer.c linux-4.1.26/kernel/time/timer.c
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
-diff -Nur linux-4.1.26.orig/kernel/trace/Kconfig linux-4.1.26/kernel/trace/Kconfig
---- linux-4.1.26.orig/kernel/trace/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/Kconfig 2016-06-19 15:30:58.711298275 +0200
-@@ -187,6 +187,24 @@
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index 3b9a48ae153a..ab3a277a3c20 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -187,6 +187,24 @@ config IRQSOFF_TRACER
enabled. This option and the preempt-off timing option can be
used together or separately.)
@@ -23440,7 +24661,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/Kconfig linux-4.1.26/kernel/trace/Kconf
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
-@@ -211,6 +229,24 @@
+@@ -211,6 +229,24 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
@@ -23465,7 +24686,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/Kconfig linux-4.1.26/kernel/trace/Kconf
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
-@@ -221,6 +257,74 @@
+@@ -221,6 +257,74 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
@@ -23540,9 +24761,26 @@ diff -Nur linux-4.1.26.orig/kernel/trace/Kconfig linux-4.1.26/kernel/trace/Kconf
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
-diff -Nur linux-4.1.26.orig/kernel/trace/latency_hist.c linux-4.1.26/kernel/trace/latency_hist.c
---- linux-4.1.26.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.26/kernel/trace/latency_hist.c 2016-06-19 15:30:58.715298429 +0200
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index 9b1044e936a6..3bbaea06824a 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
+ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
+ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
+ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
+new file mode 100644
+index 000000000000..b6c1d14b71c4
+--- /dev/null
++++ b/kernel/trace/latency_hist.c
@@ -0,0 +1,1178 @@
+/*
+ * kernel/trace/latency_hist.c
@@ -24722,24 +25960,11 @@ diff -Nur linux-4.1.26.orig/kernel/trace/latency_hist.c linux-4.1.26/kernel/trac
+}
+
+device_initcall(latency_hist_init);
-diff -Nur linux-4.1.26.orig/kernel/trace/Makefile linux-4.1.26/kernel/trace/Makefile
---- linux-4.1.26.orig/kernel/trace/Makefile 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/Makefile 2016-06-19 15:30:58.715298429 +0200
-@@ -36,6 +36,10 @@
- obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
-+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
-+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
-+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
-+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
- obj-$(CONFIG_NOP_TRACER) += trace_nop.o
- obj-$(CONFIG_STACK_TRACER) += trace_stack.o
- obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-diff -Nur linux-4.1.26.orig/kernel/trace/trace.c linux-4.1.26/kernel/trace/trace.c
---- linux-4.1.26.orig/kernel/trace/trace.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace.c 2016-06-19 15:30:58.715298429 +0200
-@@ -1630,6 +1630,7 @@
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 61ea7e8cdde5..af8b5e5469bf 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1630,6 +1630,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -24747,7 +25972,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace.c linux-4.1.26/kernel/trace/trace
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1639,8 +1640,11 @@
+@@ -1639,8 +1640,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -24760,7 +25985,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace.c linux-4.1.26/kernel/trace/trace
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2558,14 +2562,17 @@
+@@ -2558,14 +2562,17 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
@@ -24786,7 +26011,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace.c linux-4.1.26/kernel/trace/trace
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2591,11 +2598,14 @@
+@@ -2591,11 +2598,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -24806,22 +26031,11 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace.c linux-4.1.26/kernel/trace/trace
}
void
-diff -Nur linux-4.1.26.orig/kernel/trace/trace_events.c linux-4.1.26/kernel/trace/trace_events.c
---- linux-4.1.26.orig/kernel/trace/trace_events.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace_events.c 2016-06-19 15:30:58.715298429 +0200
-@@ -162,6 +162,8 @@
- __common_field(unsigned char, flags);
- __common_field(unsigned char, preempt_count);
- __common_field(int, pid);
-+ __common_field(unsigned short, migrate_disable);
-+ __common_field(unsigned short, padding);
-
- return ret;
- }
-diff -Nur linux-4.1.26.orig/kernel/trace/trace.h linux-4.1.26/kernel/trace/trace.h
---- linux-4.1.26.orig/kernel/trace/trace.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace.h 2016-06-19 15:30:58.715298429 +0200
-@@ -120,6 +120,7 @@
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 921691c5cb04..c0f3c568cac8 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -120,6 +120,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
@@ -24829,7 +26043,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace.h linux-4.1.26/kernel/trace/trace
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -128,6 +129,7 @@
+@@ -128,6 +129,7 @@ enum trace_flag_type {
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
@@ -24837,9 +26051,38 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace.h linux-4.1.26/kernel/trace/trace
};
#define TRACE_BUF_SIZE 1024
-diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/trace/trace_irqsoff.c
---- linux-4.1.26.orig/kernel/trace/trace_irqsoff.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace_irqsoff.c 2016-06-19 15:30:58.715298429 +0200
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 6459f77e2c72..b83d6a4d3912 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -162,6 +162,8 @@ static int trace_define_common_fields(void)
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
++ __common_field(unsigned short, migrate_disable);
++ __common_field(unsigned short, padding);
+
+ return ret;
+ }
+@@ -198,6 +200,14 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
+
+ local_save_flags(fbuffer->flags);
+ fbuffer->pc = preempt_count();
++ /*
++ * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
++ * preemption (adding one to the preempt_count). Since we are
++ * interested in the preempt_count at the time the tracepoint was
++ * hit, we need to subtract one to offset the increment.
++ */
++ if (IS_ENABLED(CONFIG_PREEMPT))
++ fbuffer->pc--;
+ fbuffer->ftrace_file = ftrace_file;
+
+ fbuffer->event =
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 8523ea345f2b..0f2d3e3545e8 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
@@ -24848,7 +26091,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
#include "trace.h"
-@@ -433,11 +434,13 @@
+@@ -433,11 +434,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -24862,7 +26105,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -447,6 +450,7 @@
+@@ -447,6 +450,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
@@ -24870,7 +26113,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-@@ -455,6 +459,7 @@
+@@ -455,6 +459,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
@@ -24878,7 +26121,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
}
#else /* !CONFIG_PROVE_LOCKING */
-@@ -480,6 +485,7 @@
+@@ -480,6 +485,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
*/
void trace_hardirqs_on(void)
{
@@ -24886,7 +26129,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -489,11 +495,13 @@
+@@ -489,11 +495,13 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -24900,7 +26143,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
-@@ -503,6 +511,7 @@
+@@ -503,6 +511,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -24908,7 +26151,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
-@@ -512,12 +521,14 @@
+@@ -512,12 +521,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
@@ -24923,10 +26166,11 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_irqsoff.c linux-4.1.26/kernel/tra
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
-diff -Nur linux-4.1.26.orig/kernel/trace/trace_output.c linux-4.1.26/kernel/trace/trace_output.c
---- linux-4.1.26.orig/kernel/trace/trace_output.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace_output.c 2016-06-19 15:30:58.715298429 +0200
-@@ -430,6 +430,7 @@
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 25a086bcb700..c86bed27213f 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -430,6 +430,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
char hardsoft_irq;
char need_resched;
@@ -24934,7 +26178,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_output.c linux-4.1.26/kernel/trac
char irqs_off;
int hardirq;
int softirq;
-@@ -457,6 +458,8 @@
+@@ -457,6 +458,8 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
need_resched = '.';
break;
}
@@ -24943,7 +26187,7 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_output.c linux-4.1.26/kernel/trac
hardsoft_irq =
(hardirq && softirq) ? 'H' :
-@@ -464,14 +467,25 @@
+@@ -464,14 +467,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
softirq ? 's' :
'.';
@@ -24971,10 +26215,11 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_output.c linux-4.1.26/kernel/trac
return !trace_seq_has_overflowed(s);
}
-diff -Nur linux-4.1.26.orig/kernel/trace/trace_sched_switch.c linux-4.1.26/kernel/trace/trace_sched_switch.c
---- linux-4.1.26.orig/kernel/trace/trace_sched_switch.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace_sched_switch.c 2016-06-19 15:30:58.715298429 +0200
-@@ -26,7 +26,7 @@
+diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
+index 419ca37e72c9..f270088e9929 100644
+--- a/kernel/trace/trace_sched_switch.c
++++ b/kernel/trace/trace_sched_switch.c
+@@ -26,7 +26,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
}
static void
@@ -24983,10 +26228,11 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_sched_switch.c linux-4.1.26/kerne
{
if (unlikely(!sched_ref))
return;
-diff -Nur linux-4.1.26.orig/kernel/trace/trace_sched_wakeup.c linux-4.1.26/kernel/trace/trace_sched_wakeup.c
---- linux-4.1.26.orig/kernel/trace/trace_sched_wakeup.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/trace/trace_sched_wakeup.c 2016-06-19 15:30:58.719298583 +0200
-@@ -514,7 +514,7 @@
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index d6e1003724e9..79a2a5f7fc82 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -514,7 +514,7 @@ static void wakeup_reset(struct trace_array *tr)
}
static void
@@ -24995,10 +26241,11 @@ diff -Nur linux-4.1.26.orig/kernel/trace/trace_sched_wakeup.c linux-4.1.26/kerne
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
-diff -Nur linux-4.1.26.orig/kernel/user.c linux-4.1.26/kernel/user.c
---- linux-4.1.26.orig/kernel/user.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/user.c 2016-06-19 15:30:58.719298583 +0200
-@@ -161,11 +161,11 @@
+diff --git a/kernel/user.c b/kernel/user.c
+index b069ccbfb0b0..1a2e88e98b5e 100644
+--- a/kernel/user.c
++++ b/kernel/user.c
+@@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
if (!up)
return;
@@ -25012,10 +26259,11 @@ diff -Nur linux-4.1.26.orig/kernel/user.c linux-4.1.26/kernel/user.c
}
struct user_struct *alloc_uid(kuid_t uid)
-diff -Nur linux-4.1.26.orig/kernel/watchdog.c linux-4.1.26/kernel/watchdog.c
---- linux-4.1.26.orig/kernel/watchdog.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/watchdog.c 2016-06-19 15:30:58.719298583 +0200
-@@ -262,6 +262,8 @@
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index f89ea713213f..37dd3a5bf53f 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -262,6 +262,8 @@ static int is_softlockup(unsigned long touch_ts)
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -25024,7 +26272,7 @@ diff -Nur linux-4.1.26.orig/kernel/watchdog.c linux-4.1.26/kernel/watchdog.c
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
-@@ -295,13 +297,21 @@
+@@ -295,13 +297,21 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -25048,7 +26296,7 @@ diff -Nur linux-4.1.26.orig/kernel/watchdog.c linux-4.1.26/kernel/watchdog.c
__this_cpu_write(hard_watchdog_warn, true);
return;
-@@ -444,6 +454,7 @@
+@@ -444,6 +454,7 @@ static void watchdog_enable(unsigned int cpu)
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
@@ -25056,9 +26304,10 @@ diff -Nur linux-4.1.26.orig/kernel/watchdog.c linux-4.1.26/kernel/watchdog.c
/* Enable the perf event */
watchdog_nmi_enable(cpu);
-diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
---- linux-4.1.26.orig/kernel/workqueue.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/workqueue.c 2016-06-19 15:30:58.719298583 +0200
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d0efe9295a0e..80f3bb082136 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
@@ -48,6 +48,8 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
@@ -25068,7 +26317,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
#include "workqueue_internal.h"
-@@ -121,11 +123,16 @@
+@@ -121,11 +123,16 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
@@ -25086,7 +26335,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
-@@ -134,7 +141,7 @@
+@@ -134,7 +141,7 @@ enum {
*
* WQ: wq->mutex protected.
*
@@ -25095,7 +26344,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
*
* MD: wq_mayday_lock protected.
*/
-@@ -183,7 +190,7 @@
+@@ -183,7 +190,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
@@ -25104,7 +26353,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
* from get_work_pool().
*/
struct rcu_head rcu;
-@@ -212,7 +219,7 @@
+@@ -212,7 +219,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
@@ -25113,7 +26362,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -334,6 +341,8 @@
+@@ -334,6 +341,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
@@ -25122,7 +26371,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
static int worker_thread(void *__worker);
static void copy_workqueue_attrs(struct workqueue_attrs *to,
const struct workqueue_attrs *from);
-@@ -343,14 +352,14 @@
+@@ -343,14 +352,14 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
@@ -25141,7 +26390,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-@@ -368,7 +377,7 @@
+@@ -368,7 +377,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pool: iteration cursor
* @pi: integer used for iteration
*
@@ -25150,7 +26399,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -400,7 +409,7 @@
+@@ -400,7 +409,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pwq: iteration cursor
* @wq: the target workqueue
*
@@ -25159,7 +26408,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -412,6 +421,31 @@
+@@ -412,6 +421,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
@@ -25191,7 +26440,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -562,8 +596,7 @@
+@@ -562,8 +596,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue
* @node: the node ID
*
@@ -25201,7 +26450,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -706,8 +739,8 @@
+@@ -706,8 +739,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -25212,7 +26461,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -844,51 +877,44 @@
+@@ -844,51 +877,44 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -25283,7 +26532,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
struct worker_pool *pool;
/*
-@@ -897,29 +923,26 @@
+@@ -897,29 +923,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -25323,7 +26572,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
/**
-@@ -1113,12 +1136,12 @@
+@@ -1113,12 +1136,12 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
@@ -25339,7 +26588,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
}
-@@ -1220,7 +1243,7 @@
+@@ -1220,7 +1243,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -25348,7 +26597,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1239,6 +1262,7 @@
+@@ -1239,6 +1262,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -25356,7 +26605,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1277,14 +1301,16 @@
+@@ -1277,14 +1301,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -25375,7 +26624,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return -EAGAIN;
}
-@@ -1353,7 +1379,7 @@
+@@ -1353,7 +1379,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -25384,7 +26633,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
debug_work_activate(work);
-@@ -1361,6 +1387,8 @@
+@@ -1361,6 +1387,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -25393,7 +26642,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
-@@ -1417,10 +1445,8 @@
+@@ -1417,10 +1445,8 @@ retry:
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -25406,7 +26655,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1436,7 +1462,9 @@
+@@ -1436,7 +1462,9 @@ retry:
insert_work(pwq, work, worklist, work_flags);
@@ -25416,7 +26665,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
/**
-@@ -1456,14 +1484,14 @@
+@@ -1456,14 +1484,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
@@ -25433,7 +26682,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1530,14 +1558,14 @@
+@@ -1530,14 +1558,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
@@ -25450,7 +26699,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1572,7 +1600,7 @@
+@@ -1572,7 +1600,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -25459,7 +26708,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1605,7 +1633,9 @@
+@@ -1605,7 +1633,9 @@ static void worker_enter_idle(struct worker *worker)
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -25469,7 +26718,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1638,7 +1668,9 @@
+@@ -1638,7 +1668,9 @@ static void worker_leave_idle(struct worker *worker)
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -25479,7 +26728,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
static struct worker *alloc_worker(int node)
-@@ -1806,7 +1838,9 @@
+@@ -1806,7 +1838,9 @@ static void destroy_worker(struct worker *worker)
pool->nr_workers--;
pool->nr_idle--;
@@ -25489,7 +26738,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
worker->flags |= WORKER_DIE;
wake_up_process(worker->task);
}
-@@ -2723,14 +2757,14 @@
+@@ -2723,14 +2757,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
might_sleep();
@@ -25507,7 +26756,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2757,10 +2791,11 @@
+@@ -2757,10 +2791,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -25520,7 +26769,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return false;
}
-@@ -2847,7 +2882,7 @@
+@@ -2847,7 +2882,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -25529,7 +26778,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
flush_work(work);
clear_work_data(work);
-@@ -2902,10 +2937,10 @@
+@@ -2902,10 +2937,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -25542,7 +26791,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -2940,7 +2975,7 @@
+@@ -2940,7 +2975,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
set_work_pool_and_clear_pending(&dwork->work,
get_work_pool_id(&dwork->work));
@@ -25551,7 +26800,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return ret;
}
EXPORT_SYMBOL(cancel_delayed_work);
-@@ -3198,7 +3233,7 @@
+@@ -3198,7 +3233,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -25560,7 +26809,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3252,8 +3287,8 @@
+@@ -3252,8 +3287,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -25571,7 +26820,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
/**
-@@ -3358,14 +3393,14 @@
+@@ -3358,14 +3393,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -25588,7 +26837,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
/**
-@@ -4003,7 +4038,7 @@
+@@ -4003,7 +4038,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -25597,7 +26846,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4096,7 +4131,8 @@
+@@ -4096,7 +4131,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
@@ -25607,7 +26856,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4107,7 +4143,8 @@
+@@ -4107,7 +4143,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -25617,7 +26866,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return ret;
}
-@@ -4133,15 +4170,15 @@
+@@ -4133,15 +4170,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -25637,7 +26886,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return ret;
}
-@@ -4330,7 +4367,7 @@
+@@ -4330,7 +4367,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -25646,7 +26895,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4381,7 +4418,7 @@
+@@ -4381,7 +4418,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -25655,7 +26904,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
/*
-@@ -4742,16 +4779,16 @@
+@@ -4742,16 +4779,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -25675,7 +26924,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -4865,7 +4902,8 @@
+@@ -4865,7 +4902,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
@@ -25685,7 +26934,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -4873,7 +4911,8 @@
+@@ -4873,7 +4911,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
@@ -25695,10 +26944,11 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue.c linux-4.1.26/kernel/workqueue.c
return written;
}
-diff -Nur linux-4.1.26.orig/kernel/workqueue_internal.h linux-4.1.26/kernel/workqueue_internal.h
---- linux-4.1.26.orig/kernel/workqueue_internal.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/kernel/workqueue_internal.h 2016-06-19 15:30:58.719298583 +0200
-@@ -43,6 +43,7 @@
+diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
+index 45215870ac6c..f000c4d6917e 100644
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -43,6 +43,7 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
@@ -25706,7 +26956,7 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue_internal.h linux-4.1.26/kernel/work
/*
* Opaque string set with work_set_desc(). Printed out with task
-@@ -68,7 +69,7 @@
+@@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/core.c and workqueue.c.
*/
@@ -25716,10 +26966,23 @@ diff -Nur linux-4.1.26.orig/kernel/workqueue_internal.h linux-4.1.26/kernel/work
+void wq_worker_sleeping(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
-diff -Nur linux-4.1.26.orig/lib/debugobjects.c linux-4.1.26/lib/debugobjects.c
---- linux-4.1.26.orig/lib/debugobjects.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/debugobjects.c 2016-06-19 15:30:58.719298583 +0200
-@@ -309,7 +309,10 @@
+diff --git a/lib/Kconfig b/lib/Kconfig
+index 601965a948e8..8689649d5038 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -391,6 +391,7 @@ config CHECK_SIGNATURE
+
+ config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
++ depends on !PREEMPT_RT_FULL
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 547f7f923dbc..8fcdbc2fc6d0 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -309,7 +309,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
@@ -25731,9 +26994,10 @@ diff -Nur linux-4.1.26.orig/lib/debugobjects.c linux-4.1.26/lib/debugobjects.c
db = get_bucket((unsigned long) addr);
-diff -Nur linux-4.1.26.orig/lib/dump_stack.c linux-4.1.26/lib/dump_stack.c
---- linux-4.1.26.orig/lib/dump_stack.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/dump_stack.c 2016-06-19 15:30:58.719298583 +0200
+diff --git a/lib/dump_stack.c b/lib/dump_stack.c
+index c30d07e99dba..6f2484330b50 100644
+--- a/lib/dump_stack.c
++++ b/lib/dump_stack.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
@@ -25742,9 +27006,10 @@ diff -Nur linux-4.1.26.orig/lib/dump_stack.c linux-4.1.26/lib/dump_stack.c
static void __dump_stack(void)
{
-diff -Nur linux-4.1.26.orig/lib/idr.c linux-4.1.26/lib/idr.c
---- linux-4.1.26.orig/lib/idr.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/idr.c 2016-06-19 15:30:58.723298738 +0200
+diff --git a/lib/idr.c b/lib/idr.c
+index 5335c43adf46..d0681a357e69 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
@@ -30,6 +30,7 @@
#include <linux/idr.h>
#include <linux/spinlock.h>
@@ -25753,7 +27018,7 @@ diff -Nur linux-4.1.26.orig/lib/idr.c linux-4.1.26/lib/idr.c
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-@@ -366,6 +367,35 @@
+@@ -366,6 +367,35 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
idr_mark_full(pa, id);
}
@@ -25789,7 +27054,7 @@ diff -Nur linux-4.1.26.orig/lib/idr.c linux-4.1.26/lib/idr.c
/**
* idr_preload - preload for idr_alloc()
-@@ -401,7 +431,7 @@
+@@ -401,7 +431,7 @@ void idr_preload(gfp_t gfp_mask)
WARN_ON_ONCE(in_interrupt());
might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -25798,7 +27063,7 @@ diff -Nur linux-4.1.26.orig/lib/idr.c linux-4.1.26/lib/idr.c
/*
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
-@@ -413,9 +443,9 @@
+@@ -413,9 +443,9 @@ void idr_preload(gfp_t gfp_mask)
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
struct idr_layer *new;
@@ -25810,21 +27075,11 @@ diff -Nur linux-4.1.26.orig/lib/idr.c linux-4.1.26/lib/idr.c
if (!new)
break;
-diff -Nur linux-4.1.26.orig/lib/Kconfig linux-4.1.26/lib/Kconfig
---- linux-4.1.26.orig/lib/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/Kconfig 2016-06-19 15:30:58.719298583 +0200
-@@ -391,6 +391,7 @@
-
- config CPUMASK_OFFSTACK
- bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
-+ depends on !PREEMPT_RT_FULL
- help
- Use dynamic allocation for cpumask_var_t, instead of putting
- them on the stack. This is a bit more expensive, but avoids
-diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-selftest.c
---- linux-4.1.26.orig/lib/locking-selftest.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/locking-selftest.c 2016-06-19 15:30:58.723298738 +0200
-@@ -590,6 +590,8 @@
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 872a15a2a637..b93a6103fa4d 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
@@ -25833,7 +27088,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-@@ -605,9 +607,12 @@
+@@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
@@ -25846,7 +27101,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
/*
* Enabling hardirqs with a softirq-safe lock held:
*/
-@@ -640,6 +645,8 @@
+@@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#undef E1
#undef E2
@@ -25855,7 +27110,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
/*
* Enabling irqs with an irq-safe lock held:
*/
-@@ -663,6 +670,8 @@
+@@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
@@ -25864,7 +27119,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-@@ -678,6 +687,8 @@
+@@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
@@ -25873,7 +27128,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
#undef E1
#undef E2
-@@ -709,6 +720,8 @@
+@@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
@@ -25882,7 +27137,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-@@ -724,6 +737,8 @@
+@@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
@@ -25891,7 +27146,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
#undef E1
#undef E2
#undef E3
-@@ -757,6 +772,8 @@
+@@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
@@ -25900,7 +27155,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-@@ -772,10 +789,14 @@
+@@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
@@ -25915,7 +27170,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
/*
* read-lock / write-lock irq inversion.
*
-@@ -838,6 +859,10 @@
+@@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#undef E2
#undef E3
@@ -25926,7 +27181,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
/*
* read-lock / write-lock recursion that is actually safe.
*/
-@@ -876,6 +901,8 @@
+@@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
#undef E2
#undef E3
@@ -25935,7 +27190,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
/*
* read-lock / write-lock recursion that is unsafe.
*/
-@@ -1858,6 +1885,7 @@
+@@ -1858,6 +1885,7 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
@@ -25943,7 +27198,7 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
/*
* irq-context testcases:
*/
-@@ -1870,6 +1898,28 @@
+@@ -1870,6 +1898,28 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
@@ -25972,9 +27227,10 @@ diff -Nur linux-4.1.26.orig/lib/locking-selftest.c linux-4.1.26/lib/locking-self
ww_tests();
-diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
---- linux-4.1.26.orig/lib/percpu_ida.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/percpu_ida.c 2016-06-19 15:30:58.723298738 +0200
+diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
+index f75715131f20..b1529f40865d 100644
+--- a/lib/percpu_ida.c
++++ b/lib/percpu_ida.c
@@ -26,6 +26,9 @@
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -25985,7 +27241,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
struct percpu_ida_cpu {
/*
-@@ -148,13 +151,13 @@
+@@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
unsigned long flags;
int tag;
@@ -26001,7 +27257,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
return tag;
}
-@@ -173,6 +176,7 @@
+@@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
if (!tags->nr_free)
alloc_global_tags(pool, tags);
@@ -26009,7 +27265,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
if (!tags->nr_free)
steal_tags(pool, tags);
-@@ -184,7 +188,7 @@
+@@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
}
spin_unlock(&pool->lock);
@@ -26018,7 +27274,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
if (tag >= 0 || state == TASK_RUNNING)
break;
-@@ -196,7 +200,7 @@
+@@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
schedule();
@@ -26027,7 +27283,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
tags = this_cpu_ptr(pool->tag_cpu);
}
if (state != TASK_RUNNING)
-@@ -221,7 +225,7 @@
+@@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
BUG_ON(tag >= pool->nr_tags);
@@ -26036,7 +27292,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
tags = this_cpu_ptr(pool->tag_cpu);
spin_lock(&tags->lock);
-@@ -253,7 +257,7 @@
+@@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
spin_unlock(&pool->lock);
}
@@ -26045,7 +27301,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
}
EXPORT_SYMBOL_GPL(percpu_ida_free);
-@@ -345,7 +349,7 @@
+@@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
struct percpu_ida_cpu *remote;
unsigned cpu, i, err = 0;
@@ -26054,7 +27310,7 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
for_each_possible_cpu(cpu) {
remote = per_cpu_ptr(pool->tag_cpu, cpu);
spin_lock(&remote->lock);
-@@ -367,7 +371,7 @@
+@@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
}
spin_unlock(&pool->lock);
out:
@@ -26063,10 +27319,11 @@ diff -Nur linux-4.1.26.orig/lib/percpu_ida.c linux-4.1.26/lib/percpu_ida.c
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-diff -Nur linux-4.1.26.orig/lib/radix-tree.c linux-4.1.26/lib/radix-tree.c
---- linux-4.1.26.orig/lib/radix-tree.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/radix-tree.c 2016-06-19 15:30:58.723298738 +0200
-@@ -195,12 +195,13 @@
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 8399002aa0f0..19713243e698 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -195,12 +195,13 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
@@ -26081,7 +27338,7 @@ diff -Nur linux-4.1.26.orig/lib/radix-tree.c linux-4.1.26/lib/radix-tree.c
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -240,6 +241,7 @@
+@@ -240,6 +241,7 @@ radix_tree_node_free(struct radix_tree_node *node)
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
@@ -26089,7 +27346,7 @@ diff -Nur linux-4.1.26.orig/lib/radix-tree.c linux-4.1.26/lib/radix-tree.c
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
-@@ -305,6 +307,7 @@
+@@ -305,6 +307,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
@@ -26097,10 +27354,11 @@ diff -Nur linux-4.1.26.orig/lib/radix-tree.c linux-4.1.26/lib/radix-tree.c
/*
* Return the maximum key which can be store into a
-diff -Nur linux-4.1.26.orig/lib/scatterlist.c linux-4.1.26/lib/scatterlist.c
---- linux-4.1.26.orig/lib/scatterlist.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/scatterlist.c 2016-06-19 15:30:58.723298738 +0200
-@@ -592,7 +592,7 @@
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index c9f2e8c6ccc9..f6d1f8899dca 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -592,7 +592,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
@@ -26109,7 +27367,7 @@ diff -Nur linux-4.1.26.orig/lib/scatterlist.c linux-4.1.26/lib/scatterlist.c
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
-@@ -637,7 +637,7 @@
+@@ -637,7 +637,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
if (!sg_miter_skip(&miter, skip))
return false;
@@ -26118,7 +27376,7 @@ diff -Nur linux-4.1.26.orig/lib/scatterlist.c linux-4.1.26/lib/scatterlist.c
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
-@@ -654,7 +654,7 @@
+@@ -654,7 +654,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
sg_miter_stop(&miter);
@@ -26127,10 +27385,11 @@ diff -Nur linux-4.1.26.orig/lib/scatterlist.c linux-4.1.26/lib/scatterlist.c
return offset;
}
-diff -Nur linux-4.1.26.orig/lib/smp_processor_id.c linux-4.1.26/lib/smp_processor_id.c
---- linux-4.1.26.orig/lib/smp_processor_id.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/smp_processor_id.c 2016-06-19 15:30:58.723298738 +0200
-@@ -39,8 +39,9 @@
+diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
+index 1afec32de6f2..11fa431046a8 100644
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -39,8 +39,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
if (!printk_ratelimit())
goto out_enable;
@@ -26142,10 +27401,11 @@ diff -Nur linux-4.1.26.orig/lib/smp_processor_id.c linux-4.1.26/lib/smp_processo
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
-diff -Nur linux-4.1.26.orig/lib/strnlen_user.c linux-4.1.26/lib/strnlen_user.c
---- linux-4.1.26.orig/lib/strnlen_user.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/lib/strnlen_user.c 2016-06-19 15:30:58.723298738 +0200
-@@ -85,7 +85,8 @@
+diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
+index fe9a32591c24..3a5f2b366d84 100644
+--- a/lib/strnlen_user.c
++++ b/lib/strnlen_user.c
+@@ -85,7 +85,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* @str: The string to measure.
* @count: Maximum count (including NUL character)
*
@@ -26155,7 +27415,7 @@ diff -Nur linux-4.1.26.orig/lib/strnlen_user.c linux-4.1.26/lib/strnlen_user.c
*
* Get the size of a NUL-terminated string in user space.
*
-@@ -121,7 +122,8 @@
+@@ -121,7 +122,8 @@ EXPORT_SYMBOL(strnlen_user);
* strlen_user: - Get the size of a user string INCLUDING final NUL.
* @str: The string to measure.
*
@@ -26165,10 +27425,24 @@ diff -Nur linux-4.1.26.orig/lib/strnlen_user.c linux-4.1.26/lib/strnlen_user.c
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.26.orig/mm/compaction.c linux-4.1.26/mm/compaction.c
---- linux-4.1.26.orig/mm/compaction.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/compaction.c 2016-06-19 15:30:58.823302594 +0200
-@@ -1398,10 +1398,12 @@
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 390214da4546..0cc45370563a 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -409,7 +409,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
+ select COMPACTION
+ help
+ Transparent Hugepages allows the kernel to use huge pages and
+diff --git a/mm/compaction.c b/mm/compaction.c
+index f93ada7403bf..1504b589905e 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1423,10 +1423,12 @@ check_drain:
cc->migrate_pfn & ~((1UL << cc->order) - 1);
if (last_migrated_pfn < current_block_start) {
@@ -26183,10 +27457,11 @@ diff -Nur linux-4.1.26.orig/mm/compaction.c linux-4.1.26/mm/compaction.c
/* No more flushing until we migrate again */
last_migrated_pfn = 0;
}
-diff -Nur linux-4.1.26.orig/mm/filemap.c linux-4.1.26/mm/filemap.c
---- linux-4.1.26.orig/mm/filemap.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/filemap.c 2016-06-19 15:30:58.823302594 +0200
-@@ -167,7 +167,9 @@
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 1ffef05f1c1f..7d4fa2bf6ac2 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -167,7 +167,9 @@ static void page_cache_tree_delete(struct address_space *mapping,
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
node->private_data = mapping;
@@ -26197,7 +27472,7 @@ diff -Nur linux-4.1.26.orig/mm/filemap.c linux-4.1.26/mm/filemap.c
}
}
-@@ -533,9 +535,12 @@
+@@ -533,9 +535,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
* node->private_list is protected by
* mapping->tree_lock.
*/
@@ -26212,9 +27487,10 @@ diff -Nur linux-4.1.26.orig/mm/filemap.c linux-4.1.26/mm/filemap.c
}
return 0;
}
-diff -Nur linux-4.1.26.orig/mm/highmem.c linux-4.1.26/mm/highmem.c
---- linux-4.1.26.orig/mm/highmem.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/highmem.c 2016-06-19 15:30:58.823302594 +0200
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 123bcd3ed4f2..16e8cf26d38a 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
@@ -29,10 +29,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
@@ -26228,7 +27504,7 @@ diff -Nur linux-4.1.26.orig/mm/highmem.c linux-4.1.26/mm/highmem.c
/*
* Virtual_count is not a pure "count".
-@@ -107,8 +108,9 @@
+@@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages);
@@ -26239,21 +27515,10 @@ diff -Nur linux-4.1.26.orig/mm/highmem.c linux-4.1.26/mm/highmem.c
unsigned int nr_free_highpages (void)
{
-diff -Nur linux-4.1.26.orig/mm/Kconfig linux-4.1.26/mm/Kconfig
---- linux-4.1.26.orig/mm/Kconfig 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/Kconfig 2016-06-19 15:30:58.823302594 +0200
-@@ -409,7 +409,7 @@
-
- config TRANSPARENT_HUGEPAGE
- bool "Transparent Hugepage Support"
-- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
-+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
- select COMPACTION
- help
- Transparent Hugepages allows the kernel to use huge pages and
-diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
---- linux-4.1.26.orig/mm/memcontrol.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/memcontrol.c 2016-06-19 15:30:58.827302748 +0200
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 221762e24a68..cefa875a4320 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
@@ -66,6 +66,8 @@
#include <net/sock.h>
#include <net/ip.h>
@@ -26263,7 +27528,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
#include "slab.h"
#include <asm/uaccess.h>
-@@ -85,6 +87,7 @@
+@@ -85,6 +87,7 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
@@ -26271,7 +27536,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
static const char * const mem_cgroup_stat_names[] = {
"cache",
"rss",
-@@ -2124,14 +2127,17 @@
+@@ -2124,14 +2127,17 @@ static void drain_local_stock(struct work_struct *dummy)
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
@@ -26291,7 +27556,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
}
/*
-@@ -2147,7 +2153,7 @@
+@@ -2147,7 +2153,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -26300,7 +27565,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2164,7 +2170,7 @@
+@@ -2164,7 +2170,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
schedule_work_on(cpu, &stock->work);
}
}
@@ -26309,7 +27574,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
put_online_cpus();
mutex_unlock(&percpu_charge_mutex);
}
-@@ -4803,12 +4809,12 @@
+@@ -4803,12 +4809,12 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;
@@ -26324,7 +27589,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
out_unlock:
unlock_page(page);
out:
-@@ -5551,10 +5557,10 @@
+@@ -5551,10 +5557,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
}
@@ -26337,7 +27602,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5610,14 +5616,14 @@
+@@ -5610,14 +5616,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
memcg_oom_recover(memcg);
}
@@ -26354,7 +27619,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5821,6 +5827,7 @@
+@@ -5821,6 +5827,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -26362,7 +27627,7 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5843,9 +5850,11 @@
+@@ -5843,9 +5850,11 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1);
@@ -26374,10 +27639,11 @@ diff -Nur linux-4.1.26.orig/mm/memcontrol.c linux-4.1.26/mm/memcontrol.c
}
/**
-diff -Nur linux-4.1.26.orig/mm/memory.c linux-4.1.26/mm/memory.c
---- linux-4.1.26.orig/mm/memory.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/memory.c 2016-06-19 15:30:58.827302748 +0200
-@@ -3753,7 +3753,7 @@
+diff --git a/mm/memory.c b/mm/memory.c
+index 701d9ad45c46..3456e24cce4f 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3753,7 +3753,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
}
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
@@ -26386,7 +27652,7 @@ diff -Nur linux-4.1.26.orig/mm/memory.c linux-4.1.26/mm/memory.c
{
/*
* Some code (nfs/sunrpc) uses socket ops on kernel memory while
-@@ -3763,21 +3763,15 @@
+@@ -3763,21 +3763,15 @@ void might_fault(void)
*/
if (segment_eq(get_fs(), KERNEL_DS))
return;
@@ -26413,10 +27679,11 @@ diff -Nur linux-4.1.26.orig/mm/memory.c linux-4.1.26/mm/memory.c
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-diff -Nur linux-4.1.26.orig/mm/mmu_context.c linux-4.1.26/mm/mmu_context.c
---- linux-4.1.26.orig/mm/mmu_context.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/mmu_context.c 2016-06-19 15:30:58.827302748 +0200
-@@ -23,6 +23,7 @@
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index f802c2d216a7..b1b6f238e42d 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
struct task_struct *tsk = current;
task_lock(tsk);
@@ -26424,7 +27691,7 @@ diff -Nur linux-4.1.26.orig/mm/mmu_context.c linux-4.1.26/mm/mmu_context.c
active_mm = tsk->active_mm;
if (active_mm != mm) {
atomic_inc(&mm->mm_count);
-@@ -30,6 +31,7 @@
+@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
}
tsk->mm = mm;
switch_mm(active_mm, mm, tsk);
@@ -26432,9 +27699,10 @@ diff -Nur linux-4.1.26.orig/mm/mmu_context.c linux-4.1.26/mm/mmu_context.c
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
-diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
---- linux-4.1.26.orig/mm/page_alloc.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/page_alloc.c 2016-06-19 15:30:58.827302748 +0200
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index f6f6831cec52..c6f829a374c1 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
@@ -60,6 +60,7 @@
#include <linux/page_ext.h>
#include <linux/hugetlb.h>
@@ -26443,7 +27711,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
#include <linux/page_owner.h>
#include <asm/sections.h>
-@@ -233,6 +234,18 @@
+@@ -233,6 +234,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -26462,7 +27730,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
int page_group_by_mobility_disabled __read_mostly;
void set_pageblock_migratetype(struct page *page, int migratetype)
-@@ -701,7 +714,7 @@
+@@ -701,7 +714,7 @@ static inline int free_pages_check(struct page *page)
}
/*
@@ -26471,7 +27739,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -712,18 +725,51 @@
+@@ -712,18 +725,51 @@ static inline int free_pages_check(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -26527,7 +27795,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
while (to_free) {
struct page *page;
struct list_head *list;
-@@ -739,7 +785,7 @@
+@@ -739,7 +785,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -26536,7 +27804,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -747,21 +793,11 @@
+@@ -747,21 +793,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free = to_free;
do {
@@ -26560,7 +27828,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
static void free_one_page(struct zone *zone,
-@@ -770,7 +806,9 @@
+@@ -770,7 +806,9 @@ static void free_one_page(struct zone *zone,
int migratetype)
{
unsigned long nr_scanned;
@@ -26571,7 +27839,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -780,7 +818,7 @@
+@@ -780,7 +818,7 @@ static void free_one_page(struct zone *zone,
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -26580,7 +27848,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -845,11 +883,11 @@
+@@ -845,11 +883,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -26594,7 +27862,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
-@@ -1396,16 +1434,18 @@
+@@ -1396,16 +1434,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -26616,7 +27884,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
#endif
-@@ -1421,16 +1461,21 @@
+@@ -1421,16 +1461,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -26642,7 +27910,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
/*
-@@ -1516,8 +1561,17 @@
+@@ -1516,8 +1561,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -26660,7 +27928,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
#ifdef CONFIG_HIBERNATION
-@@ -1573,7 +1627,7 @@
+@@ -1573,7 +1627,7 @@ void free_hot_cold_page(struct page *page, bool cold)
migratetype = get_pfnblock_migratetype(page, pfn);
set_freepage_migratetype(page, migratetype);
@@ -26669,7 +27937,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
__count_vm_event(PGFREE);
/*
-@@ -1599,12 +1653,17 @@
+@@ -1599,12 +1653,17 @@ void free_hot_cold_page(struct page *page, bool cold)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -26689,7 +27957,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
/*
-@@ -1735,7 +1794,7 @@
+@@ -1735,7 +1794,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -26698,7 +27966,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
-@@ -1767,13 +1826,15 @@
+@@ -1767,13 +1826,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
*/
WARN_ON_ONCE(order > 1);
}
@@ -26717,7 +27985,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -1783,13 +1844,13 @@
+@@ -1783,13 +1844,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
@@ -26733,7 +28001,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
return NULL;
}
-@@ -5680,6 +5741,7 @@
+@@ -5680,6 +5741,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -26741,7 +28009,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
/*
-@@ -6575,7 +6637,7 @@
+@@ -6575,7 +6637,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -26750,7 +28018,7 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -6584,7 +6646,7 @@
+@@ -6584,7 +6646,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
@@ -26759,10 +28027,11 @@ diff -Nur linux-4.1.26.orig/mm/page_alloc.c linux-4.1.26/mm/page_alloc.c
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-diff -Nur linux-4.1.26.orig/mm/slab.h linux-4.1.26/mm/slab.h
---- linux-4.1.26.orig/mm/slab.h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/slab.h 2016-06-19 15:30:58.827302748 +0200
-@@ -330,7 +330,11 @@
+diff --git a/mm/slab.h b/mm/slab.h
+index 4c3ac12dd644..0c9bda0eb0c1 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -330,7 +330,11 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -26774,10 +28043,11 @@ diff -Nur linux-4.1.26.orig/mm/slab.h linux-4.1.26/mm/slab.h
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
-diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
---- linux-4.1.26.orig/mm/slub.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/slub.c 2016-06-19 15:30:58.827302748 +0200
-@@ -1069,7 +1069,7 @@
+diff --git a/mm/slub.c b/mm/slub.c
+index 08342c523a85..905e283d7829 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1069,7 +1069,7 @@ static noinline struct kmem_cache_node *free_debug_processing(
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -26786,7 +28056,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
slab_lock(page);
if (!check_slab(s, page))
-@@ -1116,7 +1116,7 @@
+@@ -1116,7 +1116,7 @@ out:
fail:
slab_unlock(page);
@@ -26795,7 +28065,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
slab_fix(s, "Object at 0x%p not freed", object);
return NULL;
}
-@@ -1242,6 +1242,12 @@
+@@ -1242,6 +1242,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#endif /* CONFIG_SLUB_DEBUG */
@@ -26808,7 +28078,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1306,6 +1312,17 @@
+@@ -1306,6 +1312,17 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
kasan_slab_free(s, x);
}
@@ -26826,7 +28096,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
/*
* Slab allocation and freeing
*/
-@@ -1336,10 +1353,17 @@
+@@ -1336,10 +1353,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
@@ -26845,7 +28115,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
local_irq_enable();
flags |= s->allocflags;
-@@ -1359,13 +1383,13 @@
+@@ -1359,13 +1383,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Try a lower order alloc if possible
*/
page = alloc_slab_page(s, alloc_gfp, node, oo);
@@ -26864,7 +28134,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-@@ -1380,51 +1404,9 @@
+@@ -1380,51 +1404,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
kmemcheck_mark_unallocated_pages(page, pages);
}
@@ -26916,7 +28186,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))
-@@ -1448,10 +1430,34 @@
+@@ -1448,10 +1430,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
page->freelist = start;
page->inuse = page->objects;
page->frozen = 1;
@@ -26951,7 +28221,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
static void __free_slab(struct kmem_cache *s, struct page *page)
{
int order = compound_order(page);
-@@ -1483,6 +1489,16 @@
+@@ -1483,6 +1489,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
memcg_uncharge_slab(s, order);
}
@@ -26968,7 +28238,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1517,6 +1533,12 @@
+@@ -1517,6 +1533,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
}
call_rcu(head, rcu_free_slab);
@@ -26981,7 +28251,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
} else
__free_slab(s, page);
}
-@@ -1630,7 +1652,7 @@
+@@ -1630,7 +1652,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
@@ -26990,7 +28260,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1655,7 +1677,7 @@
+@@ -1655,7 +1677,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
@@ -26999,7 +28269,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
return object;
}
-@@ -1901,7 +1923,7 @@
+@@ -1901,7 +1923,7 @@ redo:
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -27008,7 +28278,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
} else {
m = M_FULL;
-@@ -1912,7 +1934,7 @@
+@@ -1912,7 +1934,7 @@ redo:
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -27017,7 +28287,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
}
-@@ -1947,7 +1969,7 @@
+@@ -1947,7 +1969,7 @@ redo:
goto redo;
if (lock)
@@ -27026,7 +28296,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -1979,10 +2001,10 @@
+@@ -1979,10 +2001,10 @@ static void unfreeze_partials(struct kmem_cache *s,
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -27039,7 +28309,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
do {
-@@ -2011,7 +2033,7 @@
+@@ -2011,7 +2033,7 @@ static void unfreeze_partials(struct kmem_cache *s,
}
if (n)
@@ -27048,7 +28318,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
while (discard_page) {
page = discard_page;
-@@ -2050,14 +2072,21 @@
+@@ -2050,14 +2072,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -27070,7 +28340,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2129,7 +2158,22 @@
+@@ -2129,7 +2158,22 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s)
{
@@ -27093,7 +28363,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
/*
-@@ -2165,10 +2209,10 @@
+@@ -2165,10 +2209,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
unsigned long x = 0;
struct page *page;
@@ -27106,7 +28376,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2305,9 +2349,11 @@
+@@ -2305,9 +2349,11 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
@@ -27118,7 +28388,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2375,7 +2421,13 @@
+@@ -2375,7 +2421,13 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -27132,7 +28402,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
return freelist;
new_slab:
-@@ -2392,8 +2444,7 @@
+@@ -2392,8 +2444,7 @@ new_slab:
if (unlikely(!freelist)) {
slab_out_of_memory(s, gfpflags, node);
@@ -27142,7 +28412,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
page = c->page;
-@@ -2408,8 +2459,7 @@
+@@ -2408,8 +2459,7 @@ new_slab:
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
@@ -27152,7 +28422,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
/*
-@@ -2593,7 +2643,7 @@
+@@ -2593,7 +2643,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
do {
if (unlikely(n)) {
@@ -27161,7 +28431,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
n = NULL;
}
prior = page->freelist;
-@@ -2625,7 +2675,7 @@
+@@ -2625,7 +2675,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -27170,7 +28440,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
}
-@@ -2667,7 +2717,7 @@
+@@ -2667,7 +2717,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -27179,7 +28449,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
return;
slab_empty:
-@@ -2682,7 +2732,7 @@
+@@ -2682,7 +2732,7 @@ slab_empty:
remove_full(s, n, page);
}
@@ -27188,7 +28458,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -2881,7 +2931,7 @@
+@@ -2881,7 +2931,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -27197,7 +28467,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3463,7 +3513,7 @@
+@@ -3463,7 +3513,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -27206,7 +28476,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
/*
* Build lists of slabs to discard or promote.
-@@ -3494,7 +3544,7 @@
+@@ -3494,7 +3544,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -27215,7 +28485,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -3670,6 +3720,12 @@
+@@ -3670,6 +3720,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -27228,7 +28498,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -3912,7 +3968,7 @@
+@@ -3912,7 +3968,7 @@ static int validate_slab_node(struct kmem_cache *s,
struct page *page;
unsigned long flags;
@@ -27237,7 +28507,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -3934,7 +3990,7 @@
+@@ -3934,7 +3990,7 @@ static int validate_slab_node(struct kmem_cache *s,
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -27246,7 +28516,7 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
return count;
}
-@@ -4122,12 +4178,12 @@
+@@ -4122,12 +4178,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (!atomic_long_read(&n->nr_slabs))
continue;
@@ -27261,9 +28531,10 @@ diff -Nur linux-4.1.26.orig/mm/slub.c linux-4.1.26/mm/slub.c
}
for (i = 0; i < t.count; i++) {
-diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
---- linux-4.1.26.orig/mm/swap.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/swap.c 2016-06-19 15:30:58.831302903 +0200
+diff --git a/mm/swap.c b/mm/swap.c
+index ab3b9c2dd783..b433019229f8 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
@@ -32,6 +32,7 @@
#include <linux/gfp.h>
#include <linux/uio.h>
@@ -27272,7 +28543,7 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
#include "internal.h"
-@@ -45,6 +46,9 @@
+@@ -45,6 +46,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
@@ -27282,21 +28553,21 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
-@@ -481,11 +485,11 @@
+@@ -481,11 +485,11 @@ void rotate_reclaimable_page(struct page *page)
unsigned long flags;
page_cache_get(page);
- local_irq_save(flags);
+ local_lock_irqsave(rotate_lock, flags);
pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_move_tail(pvec);
- local_irq_restore(flags);
+ local_unlock_irqrestore(rotate_lock, flags);
}
}
-@@ -536,12 +540,13 @@
+@@ -536,12 +540,13 @@ static bool need_activate_page_drain(int cpu)
void activate_page(struct page *page)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -27305,14 +28576,14 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
+ activate_page_pvecs);
page_cache_get(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_cpu_var(activate_page_pvecs);
+ put_locked_var(swapvec_lock, activate_page_pvecs);
}
}
-@@ -567,7 +572,7 @@
+@@ -567,7 +572,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
@@ -27321,7 +28592,7 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
int i;
/*
-@@ -589,7 +594,7 @@
+@@ -589,7 +594,7 @@ static void __lru_cache_activate_page(struct page *page)
}
}
@@ -27330,7 +28601,7 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
}
/*
-@@ -628,13 +633,13 @@
+@@ -628,12 +633,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -27338,27 +28609,32 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
page_cache_get(page);
- if (!pagevec_space(pvec))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
- pagevec_add(pvec, page);
- put_cpu_var(lru_add_pvec);
+ put_locked_var(swapvec_lock, lru_add_pvec);
}
/**
-@@ -814,9 +819,9 @@
+@@ -813,9 +818,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_irq_save(flags);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ local_lock_irqsave_on(rotate_lock, flags, cpu);
++ pagevec_move_tail(pvec);
++ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
++#else
+ local_lock_irqsave(rotate_lock, flags);
pagevec_move_tail(pvec);
- local_irq_restore(flags);
+ local_unlock_irqrestore(rotate_lock, flags);
++#endif
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -844,18 +849,19 @@
+@@ -843,26 +854,47 @@ void deactivate_file_page(struct page *page)
return;
if (likely(get_page_unless_zero(page))) {
@@ -27366,7 +28642,7 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
+ lru_deactivate_file_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
- put_cpu_var(lru_deactivate_file_pvecs);
+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
@@ -27381,11 +28657,65 @@ diff -Nur linux-4.1.26.orig/mm/swap.c linux-4.1.26/mm/swap.c
+ local_unlock_cpu(swapvec_lock);
}
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
++{
++ local_lock_on(swapvec_lock, cpu);
++ lru_add_drain_cpu(cpu);
++ local_unlock_on(swapvec_lock, cpu);
++}
++
++#else
++
static void lru_add_drain_per_cpu(struct work_struct *dummy)
-diff -Nur linux-4.1.26.orig/mm/truncate.c linux-4.1.26/mm/truncate.c
---- linux-4.1.26.orig/mm/truncate.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/truncate.c 2016-06-19 15:30:58.831302903 +0200
-@@ -56,8 +56,11 @@
+ {
+ lru_add_drain();
+ }
+
+ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
++{
++ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
++
++ INIT_WORK(work, lru_add_drain_per_cpu);
++ schedule_work_on(cpu, work);
++ cpumask_set_cpu(cpu, has_work);
++}
++#endif
+
+ void lru_add_drain_all(void)
+ {
+@@ -875,20 +907,17 @@ void lru_add_drain_all(void)
+ cpumask_clear(&has_work);
+
+ for_each_online_cpu(cpu) {
+- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+-
+ if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
+ pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
+- need_activate_page_drain(cpu)) {
+- INIT_WORK(work, lru_add_drain_per_cpu);
+- schedule_work_on(cpu, work);
+- cpumask_set_cpu(cpu, &has_work);
+- }
++ need_activate_page_drain(cpu))
++ remote_lru_add_drain(cpu, &has_work);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
+ for_each_cpu(cpu, &has_work)
+ flush_work(&per_cpu(lru_add_drain_work, cpu));
++#endif
+
+ put_online_cpus();
+ mutex_unlock(&lock);
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 66af9031fae8..09598db42681 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -56,8 +56,11 @@ static void clear_exceptional_entry(struct address_space *mapping,
* protected by mapping->tree_lock.
*/
if (!workingset_node_shadows(node) &&
@@ -27399,10 +28729,11 @@ diff -Nur linux-4.1.26.orig/mm/truncate.c linux-4.1.26/mm/truncate.c
__radix_tree_delete_node(&mapping->page_tree, node);
unlock:
spin_unlock_irq(&mapping->tree_lock);
-diff -Nur linux-4.1.26.orig/mm/vmalloc.c linux-4.1.26/mm/vmalloc.c
---- linux-4.1.26.orig/mm/vmalloc.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/vmalloc.c 2016-06-19 15:30:58.831302903 +0200
-@@ -819,7 +819,7 @@
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 2faaa2976447..f87a29f1e718 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -819,7 +819,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -27411,7 +28742,7 @@ diff -Nur linux-4.1.26.orig/mm/vmalloc.c linux-4.1.26/mm/vmalloc.c
void *vaddr;
node = numa_node_id();
-@@ -862,11 +862,12 @@
+@@ -862,11 +862,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
BUG_ON(err);
radix_tree_preload_end();
@@ -27426,7 +28757,7 @@ diff -Nur linux-4.1.26.orig/mm/vmalloc.c linux-4.1.26/mm/vmalloc.c
return vaddr;
}
-@@ -935,6 +936,7 @@
+@@ -935,6 +936,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -27434,7 +28765,7 @@ diff -Nur linux-4.1.26.orig/mm/vmalloc.c linux-4.1.26/mm/vmalloc.c
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -949,7 +951,8 @@
+@@ -949,7 +951,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
order = get_order(size);
rcu_read_lock();
@@ -27444,7 +28775,7 @@ diff -Nur linux-4.1.26.orig/mm/vmalloc.c linux-4.1.26/mm/vmalloc.c
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -972,7 +975,7 @@
+@@ -972,7 +975,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
break;
}
@@ -27453,10 +28784,11 @@ diff -Nur linux-4.1.26.orig/mm/vmalloc.c linux-4.1.26/mm/vmalloc.c
rcu_read_unlock();
/* Allocate new block if nothing was found */
-diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
---- linux-4.1.26.orig/mm/vmstat.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/vmstat.c 2016-06-19 15:30:58.831302903 +0200
-@@ -226,6 +226,7 @@
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 4f5cd974e11a..86f0e2e3f677 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -226,6 +226,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long x;
long t;
@@ -27464,7 +28796,7 @@ diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -235,6 +236,7 @@
+@@ -235,6 +236,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
x = 0;
}
__this_cpu_write(*p, x);
@@ -27472,7 +28804,7 @@ diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
}
EXPORT_SYMBOL(__mod_zone_page_state);
-@@ -267,6 +269,7 @@
+@@ -267,6 +269,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -27480,7 +28812,7 @@ diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -275,6 +278,7 @@
+@@ -275,6 +278,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
@@ -27488,7 +28820,7 @@ diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -289,6 +293,7 @@
+@@ -289,6 +293,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -27496,7 +28828,7 @@ diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -297,6 +302,7 @@
+@@ -297,6 +302,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
@@ -27504,10 +28836,11 @@ diff -Nur linux-4.1.26.orig/mm/vmstat.c linux-4.1.26/mm/vmstat.c
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
---- linux-4.1.26.orig/mm/workingset.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/mm/workingset.c 2016-06-19 15:30:58.835303057 +0200
-@@ -264,7 +264,8 @@
+diff --git a/mm/workingset.c b/mm/workingset.c
+index aa017133744b..263d0194734a 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -264,7 +264,8 @@ void workingset_activation(struct page *page)
* point where they would still be useful.
*/
@@ -27517,7 +28850,7 @@ diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct shrink_control *sc)
-@@ -274,9 +275,9 @@
+@@ -274,9 +275,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
unsigned long pages;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -27530,7 +28863,7 @@ diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
pages = node_present_pages(sc->nid);
/*
-@@ -363,9 +364,9 @@
+@@ -363,9 +364,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
spin_unlock(&mapping->tree_lock);
ret = LRU_REMOVED_RETRY;
out:
@@ -27542,7 +28875,7 @@ diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
spin_lock(lru_lock);
return ret;
}
-@@ -376,10 +377,10 @@
+@@ -376,10 +377,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -27556,7 +28889,7 @@ diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
return ret;
}
-@@ -400,7 +401,7 @@
+@@ -400,7 +401,7 @@ static int __init workingset_init(void)
{
int ret;
@@ -27565,7 +28898,7 @@ diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -408,7 +409,7 @@
+@@ -408,7 +409,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
@@ -27574,10 +28907,33 @@ diff -Nur linux-4.1.26.orig/mm/workingset.c linux-4.1.26/mm/workingset.c
err:
return ret;
}
-diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
---- linux-4.1.26.orig/net/core/dev.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/core/dev.c 2016-06-19 15:30:58.839303211 +0200
-@@ -184,6 +184,7 @@
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index fb1ec10ce449..e819dffd142c 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1289,7 +1289,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ class = pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+- area = &get_cpu_var(zs_map_area);
++ area = per_cpu_ptr(&zs_map_area, get_cpu_light());
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+@@ -1342,7 +1342,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+
+ __zs_unmap_object(area, pages, off, class->size);
+ }
+- put_cpu_var(zs_map_area);
++ put_cpu_light();
+ unpin_tag(handle);
+ }
+ EXPORT_SYMBOL_GPL(zs_unmap_object);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 185a3398c651..78912da59fc1 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -184,6 +184,7 @@ static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -27585,7 +28941,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -205,14 +206,14 @@
+@@ -205,14 +206,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
@@ -27602,7 +28958,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
#endif
}
-@@ -852,7 +853,8 @@
+@@ -852,7 +853,8 @@ retry:
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -27612,7 +28968,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
goto retry;
}
-@@ -1121,20 +1123,17 @@
+@@ -1121,20 +1123,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -27639,7 +28995,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1147,11 +1146,12 @@
+@@ -1147,11 +1146,12 @@ rollback:
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -27655,7 +29011,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
netdev_adjacent_rename_links(dev, oldname);
-@@ -1172,7 +1172,8 @@
+@@ -1172,7 +1172,8 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -27665,7 +29021,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1185,6 +1186,11 @@
+@@ -1185,6 +1186,11 @@ rollback:
}
return err;
@@ -27677,7 +29033,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
/**
-@@ -2214,6 +2220,7 @@
+@@ -2214,6 +2220,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -27685,7 +29041,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
void __netif_schedule(struct Qdisc *q)
-@@ -2295,6 +2302,7 @@
+@@ -2295,6 +2302,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -27693,7 +29049,19 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -2880,9 +2888,44 @@
+@@ -2820,7 +2828,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+ * This permits __QDISC___STATE_RUNNING owner to get the lock more
+ * often and dequeue packets faster.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ contended = true;
++#else
+ contended = qdisc_is_running(q);
++#endif
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
+@@ -2880,9 +2892,44 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
@@ -27738,7 +29106,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
#define RECURSION_LIMIT 10
/**
-@@ -2984,7 +3027,7 @@
+@@ -2984,7 +3031,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
if (txq->xmit_lock_owner != cpu) {
@@ -27747,7 +29115,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -2994,9 +3037,9 @@
+@@ -2994,9 +3041,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -27759,7 +29127,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
-@@ -3370,6 +3413,7 @@
+@@ -3370,6 +3417,7 @@ drop:
rps_unlock(sd);
local_irq_restore(flags);
@@ -27767,7 +29135,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -3388,7 +3432,7 @@
+@@ -3388,7 +3436,7 @@ static int netif_rx_internal(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -27776,7 +29144,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3398,13 +3442,13 @@
+@@ -3398,13 +3446,13 @@ static int netif_rx_internal(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
@@ -27793,7 +29161,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
return ret;
}
-@@ -3438,16 +3482,44 @@
+@@ -3438,16 +3486,44 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
@@ -27842,7 +29210,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3489,7 +3561,7 @@
+@@ -3489,7 +3565,7 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
root_lock = qdisc_lock(q);
@@ -27851,7 +29219,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
smp_mb__before_atomic();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
-@@ -3886,7 +3958,7 @@
+@@ -3886,7 +3962,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -27860,7 +29228,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
input_queue_head_incr(sd);
}
}
-@@ -3895,10 +3967,13 @@
+@@ -3895,10 +3971,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -27875,7 +29243,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -4349,6 +4424,7 @@
+@@ -4349,6 +4428,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -27883,7 +29251,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4362,6 +4438,7 @@
+@@ -4362,6 +4442,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
} else
#endif
local_irq_enable();
@@ -27891,7 +29259,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4443,6 +4520,7 @@
+@@ -4443,6 +4524,7 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -27899,7 +29267,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -4717,7 +4795,7 @@
+@@ -4717,7 +4799,7 @@ static void net_rx_action(struct softirq_action *h)
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
@@ -27908,7 +29276,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
net_rps_action_and_irq_enable(sd);
}
-@@ -6931,7 +7009,7 @@
+@@ -6931,7 +7013,7 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
@@ -27917,7 +29285,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
synchronize_rcu_expedited();
else
synchronize_rcu();
-@@ -7172,16 +7250,20 @@
+@@ -7172,16 +7254,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
@@ -27939,7 +29307,7 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
return NOTIFY_OK;
}
-@@ -7483,8 +7565,9 @@
+@@ -7483,8 +7569,9 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
@@ -27951,9 +29319,10 @@ diff -Nur linux-4.1.26.orig/net/core/dev.c linux-4.1.26/net/core/dev.c
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
-diff -Nur linux-4.1.26.orig/net/core/skbuff.c linux-4.1.26/net/core/skbuff.c
---- linux-4.1.26.orig/net/core/skbuff.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/core/skbuff.c 2016-06-19 15:30:58.839303211 +0200
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c9793c6c5005..d4516d7d58aa 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
@@ -63,6 +63,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
@@ -27962,7 +29331,7 @@ diff -Nur linux-4.1.26.orig/net/core/skbuff.c linux-4.1.26/net/core/skbuff.c
#include <net/protocol.h>
#include <net/dst.h>
-@@ -358,6 +359,8 @@
+@@ -358,6 +359,8 @@ struct netdev_alloc_cache {
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
@@ -27971,7 +29340,7 @@ diff -Nur linux-4.1.26.orig/net/core/skbuff.c linux-4.1.26/net/core/skbuff.c
static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
gfp_t gfp_mask)
-@@ -435,9 +438,9 @@
+@@ -435,9 +438,9 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
unsigned long flags;
void *data;
@@ -27983,7 +29352,7 @@ diff -Nur linux-4.1.26.orig/net/core/skbuff.c linux-4.1.26/net/core/skbuff.c
return data;
}
-@@ -456,7 +459,12 @@
+@@ -456,7 +459,12 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -27997,10 +29366,11 @@ diff -Nur linux-4.1.26.orig/net/core/skbuff.c linux-4.1.26/net/core/skbuff.c
}
void *napi_alloc_frag(unsigned int fragsz)
-diff -Nur linux-4.1.26.orig/net/core/sock.c linux-4.1.26/net/core/sock.c
---- linux-4.1.26.orig/net/core/sock.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/core/sock.c 2016-06-19 15:30:58.839303211 +0200
-@@ -2369,12 +2369,11 @@
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 47fc8bb3b946..23a1423f78ca 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2369,12 +2369,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
@@ -28014,9 +29384,10 @@ diff -Nur linux-4.1.26.orig/net/core/sock.c linux-4.1.26/net/core/sock.c
}
EXPORT_SYMBOL(lock_sock_nested);
-diff -Nur linux-4.1.26.orig/net/ipv4/icmp.c linux-4.1.26/net/ipv4/icmp.c
---- linux-4.1.26.orig/net/ipv4/icmp.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/ipv4/icmp.c 2016-06-19 15:30:58.839303211 +0200
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index f5203fba6236..d7358c1ac63c 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
@@ -69,6 +69,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -28025,7 +29396,63 @@ diff -Nur linux-4.1.26.orig/net/ipv4/icmp.c linux-4.1.26/net/ipv4/icmp.c
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
-@@ -867,6 +868,30 @@
+@@ -77,6 +78,7 @@
+ #include <linux/string.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ #include <net/snmp.h>
+ #include <net/ip.h>
+ #include <net/route.h>
+@@ -203,6 +205,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
+ *
+ * On SMP we have one ICMP socket per-cpu.
+ */
++static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
++
+ static struct sock *icmp_sk(struct net *net)
+ {
+ return *this_cpu_ptr(net->ipv4.icmp_sk);
+@@ -214,12 +218,14 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
+
+ local_bh_disable();
+
++ local_lock(icmp_sk_lock);
+ sk = icmp_sk(net);
+
+ if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
+ /* This can happen if the output path signals a
+ * dst_link_failure() for an outgoing ICMP packet.
+ */
++ local_unlock(icmp_sk_lock);
+ local_bh_enable();
+ return NULL;
+ }
+@@ -229,6 +235,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
+ static inline void icmp_xmit_unlock(struct sock *sk)
+ {
+ spin_unlock_bh(&sk->sk_lock.slock);
++ local_unlock(icmp_sk_lock);
+ }
+
+ int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
+@@ -356,6 +363,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
+ struct sock *sk;
+ struct sk_buff *skb;
+
++ local_lock(icmp_sk_lock);
+ sk = icmp_sk(dev_net((*rt)->dst.dev));
+ if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
+ icmp_param->data_len+icmp_param->head_len,
+@@ -378,6 +386,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_push_pending_frames(sk, fl4);
+ }
++ local_unlock(icmp_sk_lock);
+ }
+
+ /*
+@@ -867,6 +876,30 @@ static bool icmp_redirect(struct sk_buff *skb)
}
/*
@@ -28056,7 +29483,7 @@ diff -Nur linux-4.1.26.orig/net/ipv4/icmp.c linux-4.1.26/net/ipv4/icmp.c
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -893,6 +918,11 @@
+@@ -893,6 +926,11 @@ static bool icmp_echo(struct sk_buff *skb)
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
@@ -28068,10 +29495,11 @@ diff -Nur linux-4.1.26.orig/net/ipv4/icmp.c linux-4.1.26/net/ipv4/icmp.c
}
/* should there be an ICMP stat for ignored echos? */
return true;
-diff -Nur linux-4.1.26.orig/net/ipv4/sysctl_net_ipv4.c linux-4.1.26/net/ipv4/sysctl_net_ipv4.c
---- linux-4.1.26.orig/net/ipv4/sysctl_net_ipv4.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/ipv4/sysctl_net_ipv4.c 2016-06-19 15:30:58.839303211 +0200
-@@ -779,6 +779,13 @@
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index f0e829735968..aff60d4abd7c 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -779,6 +779,13 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
@@ -28085,10 +29513,58 @@ diff -Nur linux-4.1.26.orig/net/ipv4/sysctl_net_ipv4.c linux-4.1.26/net/ipv4/sys
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(int),
-diff -Nur linux-4.1.26.orig/net/mac80211/rx.c linux-4.1.26/net/mac80211/rx.c
---- linux-4.1.26.orig/net/mac80211/rx.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/mac80211/rx.c 2016-06-19 15:30:58.839303211 +0200
-@@ -3573,7 +3573,7 @@
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 13b92d595138..6bfa68fb5f21 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -62,6 +62,7 @@
+ #include <linux/init.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+
+ #include <net/net_namespace.h>
+ #include <net/icmp.h>
+@@ -563,6 +564,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(tcp_v4_send_check);
+
++static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
+ /*
+ * This routine will send an RST to the other tcp.
+ *
+@@ -684,10 +686,13 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ arg.bound_dev_if = sk->sk_bound_dev_if;
+
+ arg.tos = ip_hdr(skb)->tos;
++
++ local_lock(tcp_sk_lock);
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ &arg, arg.iov[0].iov_len);
++ local_unlock(tcp_sk_lock);
+
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+@@ -769,10 +774,12 @@ static void tcp_v4_send_ack(struct net *net,
+ if (oif)
+ arg.bound_dev_if = oif;
+ arg.tos = tos;
++ local_lock(tcp_sk_lock);
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ &arg, arg.iov[0].iov_len);
++ local_unlock(tcp_sk_lock);
+
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+ }
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 3073164a6fcf..d294ec1530b6 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3574,7 +3574,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -28097,9 +29573,10 @@ diff -Nur linux-4.1.26.orig/net/mac80211/rx.c linux-4.1.26/net/mac80211/rx.c
if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
goto drop;
-diff -Nur linux-4.1.26.orig/net/netfilter/core.c linux-4.1.26/net/netfilter/core.c
---- linux-4.1.26.orig/net/netfilter/core.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/netfilter/core.c 2016-06-19 15:30:58.839303211 +0200
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 5d0c6fd59475..e81489741143 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
@@ -22,11 +22,17 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
@@ -28118,9 +29595,10 @@ diff -Nur linux-4.1.26.orig/net/netfilter/core.c linux-4.1.26/net/netfilter/core
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-diff -Nur linux-4.1.26.orig/net/packet/af_packet.c linux-4.1.26/net/packet/af_packet.c
---- linux-4.1.26.orig/net/packet/af_packet.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/packet/af_packet.c 2016-06-19 15:30:58.839303211 +0200
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a3654d929814..7c1054d808fc 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
@@ -63,6 +63,7 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
@@ -28129,7 +29607,7 @@ diff -Nur linux-4.1.26.orig/net/packet/af_packet.c linux-4.1.26/net/packet/af_pa
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -698,7 +699,7 @@
+@@ -698,7 +699,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -28138,7 +29616,7 @@ diff -Nur linux-4.1.26.orig/net/packet/af_packet.c linux-4.1.26/net/packet/af_pa
}
}
-@@ -960,7 +961,7 @@
+@@ -960,7 +961,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -28147,9 +29625,10 @@ diff -Nur linux-4.1.26.orig/net/packet/af_packet.c linux-4.1.26/net/packet/af_pa
}
}
prb_close_block(pkc, pbd, po, status);
-diff -Nur linux-4.1.26.orig/net/rds/ib_rdma.c linux-4.1.26/net/rds/ib_rdma.c
---- linux-4.1.26.orig/net/rds/ib_rdma.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/rds/ib_rdma.c 2016-06-19 15:30:58.843303365 +0200
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index 657ba9f5d308..c8faaf36423a 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/rculist.h>
@@ -28158,7 +29637,7 @@ diff -Nur linux-4.1.26.orig/net/rds/ib_rdma.c linux-4.1.26/net/rds/ib_rdma.c
#include "rds.h"
#include "ib.h"
-@@ -286,7 +287,7 @@
+@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace(void)
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
@@ -28167,10 +29646,11 @@ diff -Nur linux-4.1.26.orig/net/rds/ib_rdma.c linux-4.1.26/net/rds/ib_rdma.c
}
}
-diff -Nur linux-4.1.26.orig/net/sched/sch_generic.c linux-4.1.26/net/sched/sch_generic.c
---- linux-4.1.26.orig/net/sched/sch_generic.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/sched/sch_generic.c 2016-06-19 15:30:58.843303365 +0200
-@@ -896,7 +896,7 @@
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 9821e6d641bb..c6bf6ea6d1d5 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -899,7 +899,7 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
@@ -28179,10 +29659,11 @@ diff -Nur linux-4.1.26.orig/net/sched/sch_generic.c linux-4.1.26/net/sched/sch_g
}
void dev_deactivate(struct net_device *dev)
-diff -Nur linux-4.1.26.orig/net/sunrpc/svc_xprt.c linux-4.1.26/net/sunrpc/svc_xprt.c
---- linux-4.1.26.orig/net/sunrpc/svc_xprt.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/net/sunrpc/svc_xprt.c 2016-06-19 15:30:58.843303365 +0200
-@@ -341,7 +341,7 @@
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 163ac45c3639..ba2313cd4e36 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -341,7 +341,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
goto out;
}
@@ -28191,7 +29672,7 @@ diff -Nur linux-4.1.26.orig/net/sunrpc/svc_xprt.c linux-4.1.26/net/sunrpc/svc_xp
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -377,7 +377,7 @@
+@@ -377,7 +377,7 @@ redo_search:
atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
@@ -28200,7 +29681,7 @@ diff -Nur linux-4.1.26.orig/net/sunrpc/svc_xprt.c linux-4.1.26/net/sunrpc/svc_xp
goto out;
}
rcu_read_unlock();
-@@ -398,7 +398,7 @@
+@@ -398,7 +398,7 @@ redo_search:
goto redo_search;
}
rqstp = NULL;
@@ -28209,10 +29690,11 @@ diff -Nur linux-4.1.26.orig/net/sunrpc/svc_xprt.c linux-4.1.26/net/sunrpc/svc_xp
out:
trace_svc_xprt_do_enqueue(xprt, rqstp);
}
-diff -Nur linux-4.1.26.orig/scripts/mkcompile_h linux-4.1.26/scripts/mkcompile_h
---- linux-4.1.26.orig/scripts/mkcompile_h 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/scripts/mkcompile_h 2016-06-19 15:30:58.843303365 +0200
-@@ -4,7 +4,8 @@
+diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
+index 6fdc97ef6023..523e0420d7f0 100755
+--- a/scripts/mkcompile_h
++++ b/scripts/mkcompile_h
+@@ -4,7 +4,8 @@ TARGET=$1
ARCH=$2
SMP=$3
PREEMPT=$4
@@ -28222,7 +29704,7 @@ diff -Nur linux-4.1.26.orig/scripts/mkcompile_h linux-4.1.26/scripts/mkcompile_h
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-@@ -57,6 +58,7 @@
+@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
CONFIG_FLAGS=""
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
@@ -28230,10 +29712,11 @@ diff -Nur linux-4.1.26.orig/scripts/mkcompile_h linux-4.1.26/scripts/mkcompile_h
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length
-diff -Nur linux-4.1.26.orig/sound/core/pcm_native.c linux-4.1.26/sound/core/pcm_native.c
---- linux-4.1.26.orig/sound/core/pcm_native.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/sound/core/pcm_native.c 2016-06-19 15:30:58.843303365 +0200
-@@ -135,7 +135,7 @@
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index aa999e747c94..8195f789c680 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
if (!substream->pcm->nonatomic)
@@ -28242,7 +29725,7 @@ diff -Nur linux-4.1.26.orig/sound/core/pcm_native.c linux-4.1.26/sound/core/pcm_
snd_pcm_stream_lock(substream);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
-@@ -150,7 +150,7 @@
+@@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
@@ -28251,7 +29734,7 @@ diff -Nur linux-4.1.26.orig/sound/core/pcm_native.c linux-4.1.26/sound/core/pcm_
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
-@@ -158,7 +158,7 @@
+@@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
{
unsigned long flags = 0;
if (!substream->pcm->nonatomic)
@@ -28260,7 +29743,7 @@ diff -Nur linux-4.1.26.orig/sound/core/pcm_native.c linux-4.1.26/sound/core/pcm_
snd_pcm_stream_lock(substream);
return flags;
}
-@@ -176,7 +176,7 @@
+@@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
@@ -28269,10 +29752,11 @@ diff -Nur linux-4.1.26.orig/sound/core/pcm_native.c linux-4.1.26/sound/core/pcm_
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
-diff -Nur linux-4.1.26.orig/sound/soc/intel/atom/sst/sst.c linux-4.1.26/sound/soc/intel/atom/sst/sst.c
---- linux-4.1.26.orig/sound/soc/intel/atom/sst/sst.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/sound/soc/intel/atom/sst/sst.c 2016-06-19 15:30:58.843303365 +0200
-@@ -368,8 +368,8 @@
+diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
+index 96c2e420cce6..a4b458e77089 100644
+--- a/sound/soc/intel/atom/sst/sst.c
++++ b/sound/soc/intel/atom/sst/sst.c
+@@ -368,8 +368,8 @@ static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
* initialize by FW or driver when firmware is loaded
*/
spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
@@ -28283,10 +29767,11 @@ diff -Nur linux-4.1.26.orig/sound/soc/intel/atom/sst/sst.c linux-4.1.26/sound/so
spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
}
-diff -Nur linux-4.1.26.orig/virt/kvm/async_pf.c linux-4.1.26/virt/kvm/async_pf.c
---- linux-4.1.26.orig/virt/kvm/async_pf.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/virt/kvm/async_pf.c 2016-06-19 15:30:58.843303365 +0200
-@@ -94,8 +94,8 @@
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index f84f5856520a..9b0cd1b03222 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -94,8 +94,8 @@ static void async_pf_execute(struct work_struct *work)
trace_kvm_async_pf_completed(addr, gva);
@@ -28297,10 +29782,11 @@ diff -Nur linux-4.1.26.orig/virt/kvm/async_pf.c linux-4.1.26/virt/kvm/async_pf.c
mmput(mm);
kvm_put_kvm(vcpu->kvm);
-diff -Nur linux-4.1.26.orig/virt/kvm/kvm_main.c linux-4.1.26/virt/kvm/kvm_main.c
---- linux-4.1.26.orig/virt/kvm/kvm_main.c 2016-06-07 01:13:11.000000000 +0200
-+++ linux-4.1.26/virt/kvm/kvm_main.c 2016-06-19 15:30:58.843303365 +0200
-@@ -218,7 +218,7 @@
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index f9746f29f870..3424e7fe6678 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -220,7 +220,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
@@ -28309,7 +29795,7 @@ diff -Nur linux-4.1.26.orig/virt/kvm/kvm_main.c linux-4.1.26/virt/kvm/kvm_main.c
kvm_async_pf_vcpu_init(vcpu);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-@@ -1780,7 +1780,7 @@
+@@ -1782,7 +1782,7 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
ktime_t start, cur;
@@ -28318,7 +29804,7 @@ diff -Nur linux-4.1.26.orig/virt/kvm/kvm_main.c linux-4.1.26/virt/kvm/kvm_main.c
bool waited = false;
start = cur = ktime_get();
-@@ -1801,7 +1801,7 @@
+@@ -1803,7 +1803,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
}
for (;;) {
@@ -28327,7 +29813,7 @@ diff -Nur linux-4.1.26.orig/virt/kvm/kvm_main.c linux-4.1.26/virt/kvm/kvm_main.c
if (kvm_vcpu_check_block(vcpu) < 0)
break;
-@@ -1810,7 +1810,7 @@
+@@ -1812,7 +1812,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
schedule();
}
@@ -28336,7 +29822,7 @@ diff -Nur linux-4.1.26.orig/virt/kvm/kvm_main.c linux-4.1.26/virt/kvm/kvm_main.c
cur = ktime_get();
out:
-@@ -1826,11 +1826,11 @@
+@@ -1828,11 +1828,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
@@ -28351,7 +29837,7 @@ diff -Nur linux-4.1.26.orig/virt/kvm/kvm_main.c linux-4.1.26/virt/kvm/kvm_main.c
++vcpu->stat.halt_wakeup;
}
-@@ -1931,7 +1931,7 @@
+@@ -1933,7 +1933,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue;
if (vcpu == me)
continue;
diff --git a/target/linux/patches/4.8.5/crisv32.patch b/target/linux/patches/4.8.6/crisv32.patch
index cb9b0d028..cb9b0d028 100644
--- a/target/linux/patches/4.8.5/crisv32.patch
+++ b/target/linux/patches/4.8.6/crisv32.patch
diff --git a/target/linux/patches/4.8.5/crisv32_ethernet_driver.patch b/target/linux/patches/4.8.6/crisv32_ethernet_driver.patch
index 0cef202fc..0cef202fc 100644
--- a/target/linux/patches/4.8.5/crisv32_ethernet_driver.patch
+++ b/target/linux/patches/4.8.6/crisv32_ethernet_driver.patch
diff --git a/target/linux/patches/4.8.5/initramfs-nosizelimit.patch b/target/linux/patches/4.8.6/initramfs-nosizelimit.patch
index 40d2f6bd8..40d2f6bd8 100644
--- a/target/linux/patches/4.8.5/initramfs-nosizelimit.patch
+++ b/target/linux/patches/4.8.6/initramfs-nosizelimit.patch
diff --git a/target/linux/patches/4.8.5/ld-or1k.patch b/target/linux/patches/4.8.6/ld-or1k.patch
index 264f9166f..264f9166f 100644
--- a/target/linux/patches/4.8.5/ld-or1k.patch
+++ b/target/linux/patches/4.8.6/ld-or1k.patch
diff --git a/target/linux/patches/4.8.5/macsonic.patch b/target/linux/patches/4.8.6/macsonic.patch
index 75a6fcad2..75a6fcad2 100644
--- a/target/linux/patches/4.8.5/macsonic.patch
+++ b/target/linux/patches/4.8.6/macsonic.patch
diff --git a/target/linux/patches/4.8.6/patch-realtime b/target/linux/patches/4.8.6/patch-realtime
new file mode 100644
index 000000000..6a856dad7
--- /dev/null
+++ b/target/linux/patches/4.8.6/patch-realtime
@@ -0,0 +1,25908 @@
+diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt
+new file mode 100644
+index 000000000000..cb61516483d3
+--- /dev/null
++++ b/Documentation/hwlat_detector.txt
+@@ -0,0 +1,64 @@
++Introduction:
++-------------
++
++The module hwlat_detector is a special purpose kernel module that is used to
++detect large system latencies induced by the behavior of certain underlying
++hardware or firmware, independent of Linux itself. The code was developed
++originally to detect SMIs (System Management Interrupts) on x86 systems,
++however there is nothing x86 specific about this patchset. It was
++originally written for use by the "RT" patch since the Real Time
++kernel is highly latency sensitive.
++
++SMIs are usually not serviced by the Linux kernel, which typically does not
++even know that they are occuring. SMIs are instead are set up by BIOS code
++and are serviced by BIOS code, usually for "critical" events such as
++management of thermal sensors and fans. Sometimes though, SMIs are used for
++other tasks and those tasks can spend an inordinate amount of time in the
++handler (sometimes measured in milliseconds). Obviously this is a problem if
++you are trying to keep event service latencies down in the microsecond range.
++
++The hardware latency detector works by hogging all of the cpus for configurable
++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
++for some period, then looking for gaps in the TSC data. Any gap indicates a
++time when the polling was interrupted and since the machine is stopped and
++interrupts turned off the only thing that could do that would be an SMI.
++
++Note that the SMI detector should *NEVER* be used in a production environment.
++It is intended to be run manually to determine if the hardware platform has a
++problem with long system firmware service routines.
++
++Usage:
++------
++
++Loading the module hwlat_detector passing the parameter "enabled=1" (or by
++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
++step required to start the hwlat_detector. It is possible to redefine the
++threshold in microseconds (us) above which latency spikes will be taken
++into account (parameter "threshold=").
++
++Example:
++
++ # modprobe hwlat_detector enabled=1 threshold=100
++
++After the module is loaded, it creates a directory named "hwlat_detector" under
++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
++to have debugfs mounted, which might be on /sys/debug on your system.
++
++The /debug/hwlat_detector interface contains the following files:
++
++count - number of latency spikes observed since last reset
++enable - a global enable/disable toggle (0/1), resets count
++max - maximum hardware latency actually observed (usecs)
++sample - a pipe from which to read current raw sample data
++ in the format <timestamp> <latency observed usecs>
++ (can be opened O_NONBLOCK for a single sample)
++threshold - minimum latency value to be considered (usecs)
++width - time period to sample with CPUs held (usecs)
++ must be less than the total window size (enforced)
++window - total period of sampling, width being inside (usecs)
++
++By default we will set width to 500,000 and window to 1,000,000, meaning that
++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
++observe any latencies that exceed the threshold (initially 100 usecs),
++then we write to a global sample ring buffer of 8K samples, which is
++consumed by reading from the "sample" (pipe) debugfs file interface.
+diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
+index 3a3b30ac2a75..9e0745cafbd8 100644
+--- a/Documentation/sysrq.txt
++++ b/Documentation/sysrq.txt
+@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
+ On other - If you know of the key combos for other architectures, please
+ let me know so I can add them to this section.
+
+-On all - write a character to /proc/sysrq-trigger. e.g.:
+-
++On all - write a character to /proc/sysrq-trigger, e.g.:
+ echo t > /proc/sysrq-trigger
+
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++ Send an ICMP echo request with this pattern plus the particular
++ SysRq command key. Example:
++ # ping -c1 -s57 -p0102030468
++ will trigger the SysRq-H (help) command.
++
++
+ * What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b' - Will immediately reboot the system without syncing or unmounting
+diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
+new file mode 100644
+index 000000000000..6f2aeabf7faa
+--- /dev/null
++++ b/Documentation/trace/histograms.txt
+@@ -0,0 +1,186 @@
++ Using the Linux Kernel Latency Histograms
++
++
++This document gives a short explanation how to enable, configure and use
++latency histograms. Latency histograms are primarily relevant in the
++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
++and are used in the quality management of the Linux real-time
++capabilities.
++
++
++* Purpose of latency histograms
++
++A latency histogram continuously accumulates the frequencies of latency
++data. There are two types of histograms
++- potential sources of latencies
++- effective latencies
++
++
++* Potential sources of latencies
++
++Potential sources of latencies are code segments where interrupts,
++preemption or both are disabled (aka critical sections). To create
++histograms of potential sources of latency, the kernel stores the time
++stamp at the start of a critical section, determines the time elapsed
++when the end of the section is reached, and increments the frequency
++counter of that latency value - irrespective of whether any concurrently
++running process is affected by latency or not.
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_INTERRUPT_OFF_LATENCY
++ CONFIG_PREEMPT_OFF_LATENCY
++
++
++* Effective latencies
++
++Effective latencies are actually occuring during wakeup of a process. To
++determine effective latencies, the kernel stores the time stamp when a
++process is scheduled to be woken up, and determines the duration of the
++wakeup time shortly before control is passed over to this process. Note
++that the apparent latency in user space may be somewhat longer, since the
++process may be interrupted after control is passed over to it but before
++the execution in user space takes place. Simply measuring the interval
++between enqueuing and wakeup may also not appropriate in cases when a
++process is scheduled as a result of a timer expiration. The timer may have
++missed its deadline, e.g. due to disabled interrupts, but this latency
++would not be registered. Therefore, the offsets of missed timers are
++recorded in a separate histogram. If both wakeup latency and missed timer
++offsets are configured and enabled, a third histogram may be enabled that
++records the overall latency as a sum of the timer latency, if any, and the
++wakeup latency. This histogram is called "timerandwakeup".
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_WAKEUP_LATENCY
++ CONFIG_MISSED_TIMER_OFSETS
++
++
++* Usage
++
++The interface to the administration of the latency histograms is located
++in the debugfs file system. To mount it, either enter
++
++mount -t sysfs nodev /sys
++mount -t debugfs nodev /sys/kernel/debug
++
++from shell command line level, or add
++
++nodev /sys sysfs defaults 0 0
++nodev /sys/kernel/debug debugfs defaults 0 0
++
++to the file /etc/fstab. All latency histogram related files are then
++available in the directory /sys/kernel/debug/tracing/latency_hist. A
++particular histogram type is enabled by writing non-zero to the related
++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
++Select "preemptirqsoff" for the histograms of potential sources of
++latencies and "wakeup" for histograms of effective latencies etc. The
++histogram data - one per CPU - are available in the files
++
++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
++
++The histograms are reset by writing non-zero to the file "reset" in a
++particular latency directory. To reset all latency data, use
++
++#!/bin/sh
++
++TRACINGDIR=/sys/kernel/debug/tracing
++HISTDIR=$TRACINGDIR/latency_hist
++
++if test -d $HISTDIR
++then
++ cd $HISTDIR
++ for i in `find . | grep /reset$`
++ do
++ echo 1 >$i
++ done
++fi
++
++
++* Data format
++
++Latency data are stored with a resolution of one microsecond. The
++maximum latency is 10,240 microseconds. The data are only valid, if the
++overflow register is empty. Every output line contains the latency in
++microseconds in the first row and the number of samples in the second
++row. To display only lines with a positive latency count, use, for
++example,
++
++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
++
++#Minimum latency: 0 microseconds.
++#Average latency: 0 microseconds.
++#Maximum latency: 25 microseconds.
++#Total samples: 3104770694
++#There are 0 samples greater or equal than 10240 microseconds
++#usecs samples
++ 0 2984486876
++ 1 49843506
++ 2 58219047
++ 3 5348126
++ 4 2187960
++ 5 3388262
++ 6 959289
++ 7 208294
++ 8 40420
++ 9 4485
++ 10 14918
++ 11 18340
++ 12 25052
++ 13 19455
++ 14 5602
++ 15 969
++ 16 47
++ 17 18
++ 18 14
++ 19 1
++ 20 3
++ 21 2
++ 22 5
++ 23 2
++ 25 1
++
++
++* Wakeup latency of a selected process
++
++To only collect wakeup latency data of a particular process, write the
++PID of the requested process to
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/pid
++
++PIDs are not considered, if this variable is set to 0.
++
++
++* Details of the process with the highest wakeup latency so far
++
++Selected data of the process that suffered from the highest wakeup
++latency that occurred in a particular CPU are available in the file
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
++
++In addition, other relevant system data at the time when the
++latency occurred are given.
++
++The format of the data is (all in one line):
++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
++<- <PID> <Priority> <Command> <Timestamp>
++
++The value of <Timeroffset> is only relevant in the combined timer
++and wakeup latency recording. In the wakeup recording, it is
++always 0, in the missed_timer_offsets recording, it is the same
++as <Latency>.
++
++When retrospectively searching for the origin of a latency and
++tracing was not enabled, it may be helpful to know the name and
++some basic data of the task that (finally) was switching to the
++late real-tlme task. In addition to the victim's data, also the
++data of the possible culprit are therefore displayed after the
++"<-" symbol.
++
++Finally, the timestamp of the time when the latency occurred
++in <seconds>.<microseconds> after the most recent system boot
++is provided.
++
++These data are also reset when the wakeup histogram is reset.
+diff --git a/Makefile b/Makefile
+index b249529204cd..5d699d055995 100644
+--- a/Makefile
++++ b/Makefile
+@@ -398,12 +398,12 @@ KBUILD_CPPFLAGS := -D__KERNEL__
+ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+- -Wno-format-security \
++ -Wno-format-security -fno-PIE \
+ -std=gnu89
+
+ KBUILD_AFLAGS_KERNEL :=
+ KBUILD_CFLAGS_KERNEL :=
+-KBUILD_AFLAGS := -D__ASSEMBLY__
++KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE
+ KBUILD_AFLAGS_MODULE := -DMODULE
+ KBUILD_CFLAGS_MODULE := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+diff --git a/arch/Kconfig b/arch/Kconfig
+index fd6e9712af81..085134ee13e9 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -9,6 +9,7 @@ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+@@ -52,6 +53,7 @@ config KPROBES
+ config JUMP_LABEL
+ bool "Optimize very unlikely/likely branches"
+ depends on HAVE_ARCH_JUMP_LABEL
++ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
+ help
+ This option enables a transparent branch optimization that
+ makes certain almost-always-true or almost-always-false branch
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index a9c4e48bb7ec..6eefe4f32302 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -36,7 +36,7 @@ config ARM
+ select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+ select HAVE_ARCH_HARDENED_USERCOPY
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+@@ -75,6 +75,7 @@ config ARM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
+index 12ebfcc1d539..c962084605bc 100644
+--- a/arch/arm/include/asm/switch_to.h
++++ b/arch/arm/include/asm/switch_to.h
+@@ -3,6 +3,13 @@
+
+ #include <linux/thread_info.h>
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ /*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
+ #define switch_to(prev,next,last) \
+ do { \
+ __complete_pending_tlbi(); \
++ switch_kmaps(prev, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+ } while (0)
+
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 776757d1604a..1f36a4eccc72 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -49,6 +49,7 @@ struct cpu_context_save {
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ __u32 cpu; /* cpu */
+@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY 7
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ * Change these and you break ASM code in entry-common.S
+ */
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
++ _TIF_NEED_RESCHED_LAZY)
+
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_ARM_THREAD_INFO_H */
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index 608008229c7d..3866da3f7bb7 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -65,6 +65,7 @@ int main(void)
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 9f157e7c51e7..468e224d76aa 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
+
+ #ifdef CONFIG_PREEMPT
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
++
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ reteq r8 @ go again
+- b 1b
++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r0, #0 @ if preempt lazy count != 0
++ beq 1b
++ ret r8 @ go again
++
+ #endif
+
+ __und_fault:
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 10c3283d6c19..8872937862cc 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -36,7 +36,9 @@
+ UNWIND(.cantunwind )
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne fast_work_pending
++ tst r1, #_TIF_SECCOMP
+ bne fast_work_pending
+
+ /* perform architecture specific actions before user return */
+@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne do_slower_path
++ tst r1, #_TIF_SECCOMP
+ beq no_work_pending
++do_slower_path:
+ UNWIND(.fnend )
+ ENDPROC(ret_fast_syscall)
+
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 612eb530f33f..cd3006dc1fd3 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -323,6 +323,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ }
+
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
++
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 7b8f2141427b..96541e00b74a 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ */
+ trace_hardirqs_off();
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 861521606c6d..e5ca865d321b 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -234,8 +234,6 @@ int __cpu_disable(void)
+ flush_cache_louis();
+ local_flush_tlb_all();
+
+- clear_tasks_mm_cpumask(cpu);
+-
+ return 0;
+ }
+
+@@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
+ pr_err("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
++
++ clear_tasks_mm_cpumask(cpu);
++
+ pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
+index 0bee233fef9a..314cfb232a63 100644
+--- a/arch/arm/kernel/unwind.c
++++ b/arch/arm/kernel/unwind.c
+@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+
+ /* Convert a prel31 symbol to an absolute address */
+@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+ /* module unwind tables */
+ struct unwind_table *table;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+ }
+@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
+ if (!tab)
+ return;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+ }
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index c94b90d43772..244dde72018a 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -584,7 +584,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ * involves poking the GIC, which must be done in a
+ * non-preemptible context.
+ */
+- preempt_disable();
++ migrate_disable();
+ kvm_pmu_flush_hwstate(vcpu);
+ kvm_timer_flush_hwstate(vcpu);
+ kvm_vgic_flush_hwstate(vcpu);
+@@ -605,7 +605,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ kvm_pmu_sync_hwstate(vcpu);
+ kvm_timer_sync_hwstate(vcpu);
+ kvm_vgic_sync_hwstate(vcpu);
+- preempt_enable();
++ migrate_enable();
+ continue;
+ }
+
+@@ -661,7 +661,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+
+ kvm_vgic_sync_hwstate(vcpu);
+
+- preempt_enable();
++ migrate_enable();
+
+ ret = handle_exit(vcpu, run, ret);
+ }
+diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
+index 98ffe1e62ad5..df9769ddece5 100644
+--- a/arch/arm/mach-exynos/platsmp.c
++++ b/arch/arm/mach-exynos/platsmp.c
+@@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
+@@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * calibrations, then wait for it to finish
+ */
+ fail:
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? ret : 0;
+ }
+diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
+index 4b653a8cb75c..b03d5a922cb1 100644
+--- a/arch/arm/mach-hisi/platmcpm.c
++++ b/arch/arm/mach-hisi/platmcpm.c
+@@ -61,7 +61,7 @@
+
+ static void __iomem *sysctrl, *fabric;
+ static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static u32 fabric_phys_addr;
+ /*
+ * [0]: bootwrapper physical address
+@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+ if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
+ return -EINVAL;
+
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+
+ if (hip04_cpu_table[cluster][cpu])
+ goto out;
+@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+
+ out:
+ hip04_cpu_table[cluster][cpu]++;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+
+ return 0;
+ }
+@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ hip04_cpu_table[cluster][cpu]--;
+ if (hip04_cpu_table[cluster][cpu] == 1) {
+ /* A power_up request went ahead of us. */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return;
+ } else if (hip04_cpu_table[cluster][cpu] > 1) {
+ pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
+@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
+ }
+
+ last_man = hip04_cluster_is_down(cluster);
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ if (last_man) {
+ /* Since it's Cortex A15, disable L2 prefetching. */
+ asm volatile(
+@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+ cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
+
+ count = TIMEOUT_MSEC / POLL_MSEC;
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ for (tries = 0; tries < count; tries++) {
+ if (hip04_cpu_table[cluster][cpu])
+ goto err;
+@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+ data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
+ if (data & CORE_WFI_STATUS(cpu))
+ break;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ /* Wait for clean L2 when the whole cluster is down. */
+ msleep(POLL_MSEC);
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ }
+ if (tries >= count)
+ goto err;
+@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+ goto err;
+ if (hip04_cluster_is_down(cluster))
+ hip04_set_snoop_filter(cluster, 0);
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 1;
+ err:
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 0;
+ }
+ #endif
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index b4de3da6dffa..b52893319d75 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -64,7 +64,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
+ .startup_addr = omap5_secondary_startup,
+ };
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
+index 0875b99add18..18b6d98d2581 100644
+--- a/arch/arm/mach-prima2/platsmp.c
++++ b/arch/arm/mach-prima2/platsmp.c
+@@ -22,7 +22,7 @@
+
+ static void __iomem *clk_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sirfsoc_secondary_init(unsigned int cpu)
+ {
+@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static const struct of_device_id clk_ids[] = {
+@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ /* make sure write buffer is drained */
+ mb();
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
+index 5494c9e0c909..e8ce157d3548 100644
+--- a/arch/arm/mach-qcom/platsmp.c
++++ b/arch/arm/mach-qcom/platsmp.c
+@@ -46,7 +46,7 @@
+
+ extern void secondary_startup_arm(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void qcom_cpu_die(unsigned int cpu)
+@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int scss_release_secondary(unsigned int cpu)
+@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return ret;
+ }
+diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
+index 8d1e2d551786..7fa56cc78118 100644
+--- a/arch/arm/mach-spear/platsmp.c
++++ b/arch/arm/mach-spear/platsmp.c
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+
+@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
+index ea5a2277ee46..b988e081ac79 100644
+--- a/arch/arm/mach-sti/platsmp.c
++++ b/arch/arm/mach-sti/platsmp.c
+@@ -35,7 +35,7 @@ static void write_pen_release(int val)
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sti_secondary_init(unsigned int cpu)
+ {
+@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 3a2e678b8d30..3ed1e9ba6a01 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
+diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
+index d02f8187b1cc..542692dbd40a 100644
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
+ return *ptep;
+ }
+
++static unsigned int fixmap_idx(int type)
++{
++ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++}
++
+ void *kmap(struct page *page)
+ {
+ might_sleep();
+@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
+
+ void *kmap_atomic(struct page *page)
+ {
++ pte_t pte = mk_pte(page, kmap_prot);
+ unsigned int idx;
+ unsigned long vaddr;
+ void *kmap;
+ int type;
+
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
+
+ type = kmap_atomic_idx_push();
+
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++ idx = fixmap_idx(type);
+ vaddr = __fix_to_virt(idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ /*
+@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
+ */
+- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
+@@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
+
+ if (kvaddr >= (void *)FIXADDR_START) {
+ type = kmap_atomic_idx();
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++ idx = fixmap_idx(type);
+
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr != __fix_to_virt(idx));
+- set_fixmap_pte(idx, __pte(0));
+ #else
+ (void) idx; /* to kill a warning */
+ #endif
++ set_fixmap_pte(idx, __pte(0));
+ kmap_atomic_idx_pop();
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+ }
+ pagefault_enable();
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++ pte_t pte = pfn_pte(pfn, kmap_prot);
+ unsigned long vaddr;
+ int idx, type;
+ struct page *page = pfn_to_page(pfn);
+
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++ idx = fixmap_idx(type);
+ vaddr = __fix_to_virt(idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
+ #endif
+- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
++
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = fixmap_idx(i);
++
++ set_fixmap_pte(idx, __pte(0));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = fixmap_idx(i);
++
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_fixmap_pte(idx, next_p->kmap_pte[i]);
++ }
++}
++#endif
+diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
+index c2366510187a..6b60f582b738 100644
+--- a/arch/arm/plat-versatile/platsmp.c
++++ b/arch/arm/plat-versatile/platsmp.c
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void versatile_secondary_init(unsigned int cpu)
+ {
+@@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index bc3f00f586f1..0f3df6d5154a 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -90,6 +90,7 @@ config ARM64
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RCU_TABLE_FREE
+ select HAVE_SYSCALL_TRACEPOINTS
+@@ -689,7 +690,7 @@ config XEN_DOM0
+
+ config XEN
+ bool "Xen guest support on ARM64"
+- depends on ARM64 && OF
++ depends on ARM64 && OF && !PREEMPT_RT_FULL
+ select SWIOTLB_XEN
+ select PARAVIRT
+ help
+diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
+index abd64bd1f6d9..9170788ffa37 100644
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -49,6 +49,7 @@ struct thread_info {
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ int cpu; /* cpu */
+ };
+
+@@ -109,6 +110,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
++#define TIF_NEED_RESCHED_LAZY 4
+ #define TIF_NOHZ 7
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+@@ -124,6 +126,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_NOHZ (1 << TIF_NOHZ)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -132,7 +135,8 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_32BIT (1 << TIF_32BIT)
+
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
++ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
++ _TIF_NEED_RESCHED_LAZY)
+
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 05070b72fc28..acfeddb1283a 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -37,6 +37,7 @@ int main(void)
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 441420ca7d08..404792bdca99 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -434,11 +434,16 @@ ENDPROC(el1_sync)
+
+ #ifdef CONFIG_PREEMPT
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+- cbnz w24, 1f // preempt count != 0
++ cbnz w24, 2f // preempt count != 0
+ ldr x0, [tsk, #TI_FLAGS] // get flags
+- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+- bl el1_preempt
++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
++
++ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
++ cbnz w24, 2f // preempt lazy count != 0
++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
+ 1:
++ bl el1_preempt
++2:
+ #endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_on
+@@ -452,6 +457,7 @@ ENDPROC(el1_irq)
+ 1: bl preempt_schedule_irq // irq en/disable is done inside
+ ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+ tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
++ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
+ ret x24
+ #endif
+
+@@ -708,6 +714,7 @@ ENDPROC(cpu_switch_to)
+ */
+ work_pending:
+ tbnz x1, #TIF_NEED_RESCHED, work_resched
++ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched
+ /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
+ mov x0, sp // 'regs'
+ enable_irq // enable interrupts for do_notify_resume()
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 212ff92920d2..71ad38d3d76b 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2480,7 +2480,7 @@ config MIPS_ASID_BITS_VARIABLE
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 792cb1768c8f..ddf5a0fdb25a 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -57,10 +57,11 @@ config LOCKDEP_SUPPORT
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config GENERIC_LOCKBREAK
+ bool
+@@ -140,6 +141,7 @@ config PPC
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+ select HAVE_MOD_ARCH_SPECIFIC
+ select MODULES_USE_ELF_RELA
+ select CLONE_BACKWARDS
+@@ -326,7 +328,7 @@ menu "Kernel options"
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 87e4b2d8dcd4..981e501a4359 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -43,6 +43,8 @@ struct thread_info {
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ unsigned long local_flags; /* private flags for thread */
+ #ifdef CONFIG_LIVEPATCH
+ unsigned long *livepatch_sp;
+@@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+ #define TIF_SIGPENDING 1 /* signal pending */
+ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+- TIF_NEED_RESCHED */
++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
+ #define TIF_32BIT 4 /* 32 bit binary */
+ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+@@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
++ TIF_NEED_RESCHED */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -125,14 +128,16 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_RESTORE_TM)
++ _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index b89d14c0352c..81ae8f4c88f6 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -156,6 +156,7 @@ int main(void)
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 9899032230b4..f95b93f46c47 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -835,7 +835,14 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ andi. r8,r8,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r9)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore
++ lwz r0,TI_FLAGS(r9)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++1:
+ lwz r3,_MSR(r1)
+ andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+@@ -846,11 +853,11 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+ */
+ bl trace_hardirqs_off
+ #endif
+-1: bl preempt_schedule_irq
++2: bl preempt_schedule_irq
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r3,TI_FLAGS(r9)
+- andi. r0,r3,_TIF_NEED_RESCHED
+- bne- 1b
++ andi. r0,r3,_TIF_NEED_RESCHED_MASK
++ bne- 2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1192,7 +1199,7 @@ do_resched: /* r10 contains MSR_KERNEL here */
+ MTMSRD(r10) /* disable interrupts */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_FLAGS(r9)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 5afd03e5e8b8..f5d4c2a033ef 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -657,7 +657,7 @@ _GLOBAL(ret_from_except_lite)
+ bl restore_math
+ b restore
+ #endif
+-1: andi. r0,r4,_TIF_NEED_RESCHED
++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+@@ -719,10 +719,18 @@ _GLOBAL(ret_from_except_lite)
+
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
+- andi. r0,r4,_TIF_NEED_RESCHED
+- beq+ restore
+- /* Check that preempt_count() == 0 and interrupts are enabled */
+ lwz r8,TI_PREEMPT(r9)
++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
++ bne restore
++ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ check_count
++
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
++ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
++
++ /* Check that preempt_count() == 0 and interrupts are enabled */
++check_count:
+ cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+@@ -739,7 +747,7 @@ _GLOBAL(ret_from_except_lite)
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+- andi. r0,r4,_TIF_NEED_RESCHED
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ bne 1b
+
+ /*
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 08887cf2b20e..f1770ea2d094 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -633,6 +633,7 @@ void irq_ctx_init(void)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curtp, *irqtp;
+@@ -650,6 +651,7 @@ void do_softirq_own_stack(void)
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+ }
++#endif
+
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index d9c912b6e632..7b2e997a5083 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -40,6 +40,7 @@
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+@@ -56,6 +57,7 @@ _GLOBAL(call_do_softirq)
+ stw r10,THREAD+KSP_LIMIT(r2)
+ mtlr r0
+ blr
++#endif
+
+ /*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index cb195157b318..c919a2bfd0ca 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -30,6 +30,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+@@ -40,6 +41,7 @@ _GLOBAL(call_do_softirq)
+ ld r0,16(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_do_irq)
+ mflr r0
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index c2024ac9d4e8..2303788da7e1 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -172,6 +172,7 @@ config KVM_E500MC
+ config KVM_MPIC
+ bool "KVM in-kernel MPIC emulation"
+ depends on KVM && E500
++ depends on !PREEMPT_RT_FULL
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQ_ROUTING
+diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
+index 57caaf11a83f..030c9bfe52e3 100644
+--- a/arch/powerpc/platforms/ps3/device-init.c
++++ b/arch/powerpc/platforms/ps3/device-init.c
+@@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
+ }
+ pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
+
+- res = wait_event_interruptible(dev->done.wait,
++ res = swait_event_interruptible(dev->done.wait,
+ dev->done.done || kthread_should_stop());
+ if (kthread_should_stop())
+ res = -EINTR;
+diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
+index 6c0378c0b8b5..abd58b4dff97 100644
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curctx;
+@@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 59b09600dd32..1b073eb3dc2a 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -187,12 +187,10 @@ config NR_CPUS
+ source kernel/Kconfig.hz
+
+ config RWSEM_GENERIC_SPINLOCK
+- bool
+- default y if SPARC32
++ def_bool PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+- bool
+- default y if SPARC64
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_HWEIGHT
+ bool
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index 34a7930b76ef..773740521008 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+@@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2a1f0ce7c59a..bd4ab87efb31 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -17,6 +17,7 @@ config X86_64
+ ### Arch settings
+ config X86
+ def_bool y
++ select HAVE_PREEMPT_LAZY
+ select ACPI_LEGACY_TABLES_LOOKUP if ACPI
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+ select ANON_INODES
+@@ -231,8 +232,11 @@ config ARCH_MAY_HAVE_PC_FDC
+ def_bool y
+ depends on ISA_DMA_API
+
++config RWSEM_GENERIC_SPINLOCK
++ def_bool PREEMPT_RT_FULL
++
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool y
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+@@ -885,7 +889,7 @@ config IOMMU_HELPER
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ depends on X86_64 && SMP && DEBUG_KERNEL
+- select CPUMASK_OFFSTACK
++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ ---help---
+ Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ If unsure, say N.
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index 0ab5ee1c26af..fff8f6f1f90c 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+- nbytes & AES_BLOCK_MASK);
++ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++ kernel_fpu_begin();
+ aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
++ kernel_fpu_begin();
+ ctr_crypt_final(ctx, &walk);
++ kernel_fpu_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index 8648158f3916..d7699130ee36 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
+ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ bool enc)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ const unsigned int bsize = CAST5_BLOCK_SIZE;
+ unsigned int nbytes;
+@@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ u8 *wsrc = walk->src.virt.addr;
+ u8 *wdst = walk->dst.virt.addr;
+
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+
+ /* Process multi-block batch */
+ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+@@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ } while (nbytes >= bsize);
+
+ done:
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, walk, nbytes);
+ }
+-
+- cast5_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ while ((nbytes = walk.nbytes)) {
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+ nbytes = __cbc_decrypt(desc, &walk);
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+-
+- cast5_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+ nbytes = __ctr_crypt(desc, &walk);
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- cast5_fpu_end(fpu_enabled);
+-
+ if (walk.nbytes) {
+ ctr_crypt_final(desc, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
+index 6a85598931b5..3a506ce7ed93 100644
+--- a/arch/x86/crypto/glue_helper.c
++++ b/arch/x86/crypto/glue_helper.c
+@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ const unsigned int bsize = 128 / 8;
+ unsigned int nbytes, i, func_bytes;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ int err;
+
+ err = blkcipher_walk_virt(desc, walk);
+@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ u8 *wdst = walk->dst.virt.addr;
+
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+
+ for (i = 0; i < gctx->num_funcs; i++) {
+ func_bytes = bsize * gctx->funcs[i].num_blocks;
+@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ }
+
+ done:
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+
+ while ((nbytes = walk.nbytes)) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+ nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
+@@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+
+ while ((nbytes = walk.nbytes) >= bsize) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+ nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+-
+ if (walk.nbytes) {
+ glue_ctr_crypt_final_128bit(
+ gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
+@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+ void *tweak_ctx, void *crypt_ctx)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+
+ /* set minimum length to bsize, for tweak_fn */
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled,
++ desc, false,
+ nbytes < bsize ? bsize : nbytes);
+-
+ /* calculate first value of T */
+ tweak_fn(tweak_ctx, walk.iv, walk.iv);
++ glue_fpu_end(fpu_enabled);
+
+ while (nbytes) {
++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
++ desc, false, nbytes);
+ nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
+
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = walk.nbytes;
+ }
+-
+- glue_fpu_end(fpu_enabled);
+-
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 1433f6b4607d..f963fde8e4fa 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -136,7 +136,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+
+ #define EXIT_TO_USERMODE_LOOP_FLAGS \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
+
+ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ {
+@@ -152,9 +152,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ /* We have work to do. */
+ local_irq_enable();
+
+- if (cached_flags & _TIF_NEED_RESCHED)
++ if (cached_flags & _TIF_NEED_RESCHED_MASK)
+ schedule();
+
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (unlikely(current->forced_info.si_signo)) {
++ struct task_struct *t = current;
++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
++ t->forced_info.si_signo = 0;
++ }
++#endif
+ if (cached_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 0b56666e6039..1d8ee026c9c5 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -271,8 +271,25 @@ END(ret_from_exception)
+ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ need_resched:
++ # preempt count == 0 + NEED_RS set?
+ cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz restore_all
++#else
++ jz test_int_off
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jne restore_all
++
++ GET_THREAD_INFO(%ebp)
++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
++
++ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
++ jz restore_all
++test_int_off:
++#endif
+ testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 02fff3ebfb87..81ec3d016df0 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -512,7 +512,23 @@ GLOBAL(retint_user)
+ bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ jnc 1f
+ 0: cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz 1f
++#else
++ jz do_preempt_schedule_irq
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jnz 1f
++
++ GET_THREAD_INFO(%rcx)
++ cmpl $0, TI_preempt_lazy_count(%rcx)
++ jnz 1f
++
++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
++ jnc 1f
++do_preempt_schedule_irq:
++#endif
+ call preempt_schedule_irq
+ jmp 0b
+ 1:
+@@ -817,6 +833,7 @@ END(native_load_gs_index)
+ jmp 2b
+ .previous
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(do_softirq_own_stack)
+ pushq %rbp
+@@ -829,6 +846,7 @@ ENTRY(do_softirq_own_stack)
+ decl PER_CPU_VAR(irq_count)
+ ret
+ END(do_softirq_own_stack)
++#endif
+
+ #ifdef CONFIG_XEN
+ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 17f218645701..11bd1b7ee6eb 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+ }
+
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++ if (____preempt_count_dec_and_test())
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
++}
++
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ u32 tmp;
++
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
++
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp)
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPT
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index dd1e7d6387ab..d59bedb28bab 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -23,6 +23,19 @@ typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+ } sigset_t;
+
++/*
++ * Because some traps use the IST stack, we must keep preemption
++ * disabled while calling do_trap(), but do_trap() may call
++ * force_sig_info() which will grab the signal spin_locks for the
++ * task, which in PREEMPT_RT_FULL are mutexes. By defining
++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
++ * trap.
++ */
++#if defined(CONFIG_PREEMPT_RT_FULL)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
++
+ #ifndef CONFIG_COMPAT
+ typedef sigset_t compat_sigset_t;
+ #endif
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 58505f01962f..02fa39652cd6 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -59,7 +59,7 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- u64 canary;
++ u64 uninitialized_var(canary);
+ u64 tsc;
+
+ #ifdef CONFIG_X86_64
+@@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void)
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
++ *
++ * For preempt-rt we need to weaken the randomness a bit, as
++ * we can't call into the random generator from atomic context
++ * due to locking constraints. We just leave canary
++ * uninitialized and use the TSC based randomness on top of it.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ get_random_bytes(&canary, sizeof(canary));
++#endif
+ tsc = rdtsc();
+ canary += tsc + (tsc << 32UL);
+
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 8b7c8d8e0852..631059ef61da 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -57,6 +57,8 @@ struct thread_info {
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+ __u32 cpu; /* current CPU */
++ int preempt_lazy_count; /* 0 => lazy preemptable
++ <0 => BUG */
+ };
+
+ #define INIT_THREAD_INFO(tsk) \
+@@ -73,6 +75,10 @@ struct thread_info {
+
+ #include <asm/asm-offsets.h>
+
++#define GET_THREAD_INFO(reg) \
++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
++ _ASM_SUB $(THREAD_SIZE),reg ;
++
+ #endif
+
+ /*
+@@ -91,6 +97,7 @@ struct thread_info {
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+ #define TIF_NOTSC 16 /* TSC is not accessible in userland */
+@@ -115,6 +122,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_NOTSC (1 << TIF_NOTSC)
+@@ -151,6 +159,8 @@ struct thread_info {
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #define STACK_WARN (THREAD_SIZE/8)
+
+ /*
+diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
+index cc44d926c17e..df278aa0f638 100644
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -615,9 +615,9 @@ struct bau_control {
+ cycles_t send_message;
+ cycles_t period_end;
+ cycles_t period_time;
+- spinlock_t uvhub_lock;
+- spinlock_t queue_lock;
+- spinlock_t disable_lock;
++ raw_spinlock_t uvhub_lock;
++ raw_spinlock_t queue_lock;
++ raw_spinlock_t disable_lock;
+ /* tunables */
+ int max_concurr;
+ int max_concurr_const;
+@@ -776,15 +776,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
++static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
+ {
+- spin_lock(lock);
++ raw_spin_lock(lock);
+ if (atomic_read(v) >= u) {
+- spin_unlock(lock);
++ raw_spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+- spin_unlock(lock);
++ raw_spin_unlock(lock);
+ return 1;
+ }
+
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index fbd19444403f..e78f477a4ae3 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
+ * ->ioapic_mutex
+ * ->ioapic_lock
+ */
++#ifdef CONFIG_X86_IO_APIC
+ static DEFINE_MUTEX(acpi_ioapic_lock);
++#endif
+
+ /* --------------------------------------------------------------------------
+ Boot-time Configuration
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 48e6d84f173e..0b5a8b994f65 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
+ static inline bool ioapic_irqd_mask(struct irq_data *data)
+ {
+ /* If we are moving the irq we need to mask it */
+- if (unlikely(irqd_is_setaffinity_pending(data))) {
++ if (unlikely(irqd_is_setaffinity_pending(data) &&
++ !irqd_irq_inprogress(data))) {
+ mask_ioapic_irq(data);
+ return true;
+ }
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 2bd5c6ff7ee7..a2c317f5839b 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -31,6 +31,7 @@ void common(void) {
+ BLANK();
+ OFFSET(TI_flags, thread_info, flags);
+ OFFSET(TI_status, thread_info, status);
++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
+
+ BLANK();
+ OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
+@@ -88,4 +89,5 @@ void common(void) {
+
+ BLANK();
+ DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 79d8ec849468..accbf0e806d0 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -41,6 +41,8 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
++#include <linux/swork.h>
+
+ #include <asm/processor.h>
+ #include <asm/traps.h>
+@@ -1291,7 +1293,7 @@ void mce_log_therm_throt_event(__u64 status)
+ static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
+
+ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+
+ static unsigned long mce_adjust_timer_default(unsigned long interval)
+ {
+@@ -1300,32 +1302,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
+
+ static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
+
+-static void __restart_timer(struct timer_list *t, unsigned long interval)
++static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
+ {
+- unsigned long when = jiffies + interval;
+- unsigned long flags;
+-
+- local_irq_save(flags);
+-
+- if (timer_pending(t)) {
+- if (time_before(when, t->expires))
+- mod_timer(t, when);
+- } else {
+- t->expires = round_jiffies(when);
+- add_timer_on(t, smp_processor_id());
+- }
+-
+- local_irq_restore(flags);
++ if (!interval)
++ return HRTIMER_NORESTART;
++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
++ return HRTIMER_RESTART;
+ }
+
+-static void mce_timer_fn(unsigned long data)
++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- int cpu = smp_processor_id();
+ unsigned long iv;
+
+- WARN_ON(cpu != data);
+-
+ iv = __this_cpu_read(mce_next_interval);
+
+ if (mce_available(this_cpu_ptr(&cpu_info))) {
+@@ -1348,7 +1336,7 @@ static void mce_timer_fn(unsigned long data)
+
+ done:
+ __this_cpu_write(mce_next_interval, iv);
+- __restart_timer(t, iv);
++ return __restart_timer(timer, iv);
+ }
+
+ /*
+@@ -1356,7 +1344,7 @@ static void mce_timer_fn(unsigned long data)
+ */
+ void mce_timer_kick(unsigned long interval)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ unsigned long iv = __this_cpu_read(mce_next_interval);
+
+ __restart_timer(t, interval);
+@@ -1371,7 +1359,7 @@ static void mce_timer_delete_all(void)
+ int cpu;
+
+ for_each_online_cpu(cpu)
+- del_timer_sync(&per_cpu(mce_timer, cpu));
++ hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1381,6 +1369,56 @@ static void mce_do_trigger(struct work_struct *work)
+
+ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+
++static void __mce_notify_work(struct swork_event *event)
++{
++ /* Not more than two messages every minute */
++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
++
++ /* wake processes polling /dev/mcelog */
++ wake_up_interruptible(&mce_chrdev_wait);
++
++ /*
++ * There is no risk of missing notifications because
++ * work_pending is always cleared before the function is
++ * executed.
++ */
++ if (mce_helper[0] && !work_pending(&mce_trigger_work))
++ schedule_work(&mce_trigger_work);
++
++ if (__ratelimit(&ratelimit))
++ pr_info(HW_ERR "Machine check events logged\n");
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++static bool notify_work_ready __read_mostly;
++static struct swork_event notify_work;
++
++static int mce_notify_work_init(void)
++{
++ int err;
++
++ err = swork_get();
++ if (err)
++ return err;
++
++ INIT_SWORK(&notify_work, __mce_notify_work);
++ notify_work_ready = true;
++ return 0;
++}
++
++static void mce_notify_work(void)
++{
++ if (notify_work_ready)
++ swork_queue(&notify_work);
++}
++#else
++static void mce_notify_work(void)
++{
++ __mce_notify_work(NULL);
++}
++static inline int mce_notify_work_init(void) { return 0; }
++#endif
++
+ /*
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+@@ -1388,19 +1426,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+ */
+ int mce_notify_irq(void)
+ {
+- /* Not more than two messages every minute */
+- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+-
+ if (test_and_clear_bit(0, &mce_need_notify)) {
+- /* wake processes polling /dev/mcelog */
+- wake_up_interruptible(&mce_chrdev_wait);
+-
+- if (mce_helper[0])
+- schedule_work(&mce_trigger_work);
+-
+- if (__ratelimit(&ratelimit))
+- pr_info(HW_ERR "Machine check events logged\n");
+-
++ mce_notify_work();
+ return 1;
+ }
+ return 0;
+@@ -1717,7 +1744,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
+ }
+ }
+
+-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
++static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+ {
+ unsigned long iv = check_interval * HZ;
+
+@@ -1726,16 +1753,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+
+ per_cpu(mce_next_interval, cpu) = iv;
+
+- t->expires = round_jiffies(jiffies + iv);
+- add_timer_on(t, cpu);
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++ 0, HRTIMER_MODE_REL_PINNED);
+ }
+
+ static void __mcheck_cpu_init_timer(void)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ unsigned int cpu = smp_processor_id();
+
+- setup_pinned_timer(t, mce_timer_fn, cpu);
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_timer_fn;
+ mce_start_timer(cpu, t);
+ }
+
+@@ -2459,6 +2487,8 @@ static void mce_disable_cpu(void *h)
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
+ return;
+
++ hrtimer_cancel(this_cpu_ptr(&mce_timer));
++
+ if (!(action & CPU_TASKS_FROZEN))
+ cmci_clear();
+
+@@ -2481,6 +2511,7 @@ static void mce_reenable_cpu(void *h)
+ if (b->init)
+ wrmsrl(msr_ops.ctl(i), b->ctl);
+ }
++ __mcheck_cpu_init_timer();
+ }
+
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2488,7 +2519,6 @@ static int
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+ unsigned int cpu = (unsigned long)hcpu;
+- struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+@@ -2508,11 +2538,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ break;
+ case CPU_DOWN_PREPARE:
+ smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+- del_timer_sync(t);
+ break;
+ case CPU_DOWN_FAILED:
+ smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+- mce_start_timer(cpu, t);
+ break;
+ }
+
+@@ -2551,6 +2579,10 @@ static __init int mcheck_init_device(void)
+ goto err_out;
+ }
+
++ err = mce_notify_work_init();
++ if (err)
++ goto err_out;
++
+ if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto err_out;
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index 09675712eba8..eea7557b355d 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data)
+ {
+- const unsigned cpu = get_cpu();
++ const unsigned cpu = get_cpu_light();
+ int graph = 0;
+ u32 *prev_esp;
+
+@@ -84,7 +84,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ break;
+ touch_nmi_watchdog();
+ }
+- put_cpu();
++ put_cpu_light();
+ }
+ EXPORT_SYMBOL(dump_trace);
+
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 9ee4520ce83c..2cd610b68868 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data)
+ {
+- const unsigned cpu = get_cpu();
++ const unsigned cpu = get_cpu_light();
+ unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned long dummy;
+ unsigned used = 0;
+@@ -239,7 +239,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ * This handles the process stack:
+ */
+ bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
+- put_cpu();
++ put_cpu_light();
+ }
+ EXPORT_SYMBOL(dump_trace);
+
+@@ -253,7 +253,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ int cpu;
+ int i;
+
+- preempt_disable();
++ migrate_disable();
+ cpu = smp_processor_id();
+
+ irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
+@@ -299,7 +299,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ stack++;
+ touch_nmi_watchdog();
+ }
+- preempt_enable();
++ migrate_enable();
+
+ pr_cont("\n");
+ show_trace_log_lvl(task, regs, sp, bp, log_lvl);
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 1f38d9a4d9de..053bf3b2ef39 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
+ cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct irq_stack *irqstk;
+@@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
+
+ call_on_stack(__do_softirq, isp);
+ }
++#endif
+
+ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+ {
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index d86be29c38c7..b0e29d1a0571 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -35,6 +35,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
++#include <linux/highmem.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/ldt.h>
+@@ -210,6 +211,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ }
+ EXPORT_SYMBOL_GPL(start_thread);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
++
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++ pte_t *ptep = kmap_pte - idx;
++
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++}
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+
+ /*
+ * switch_to(x,y) should switch tasks from x to y.
+@@ -286,6 +316,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
++ switch_kmaps(prev_p, next_p);
++
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index b62c85229711..d907b281a9d6 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1938,6 +1938,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+ hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED);
+ apic->lapic_timer.timer.function = apic_timer_fn;
++ apic->lapic_timer.timer.irqsafe = 1;
+
+ /*
+ * APIC is created enabled. This will prevent kvm_lapic_set_base from
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 699f8726539a..24f30c86510c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5865,6 +5865,13 @@ int kvm_arch_init(void *opaque)
+ goto out;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out_free_percpu;
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index 6d18b70ed5a9..f752724c22e8 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
+ */
+ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
++ pte_t pte = mk_pte(page, prot);
+ unsigned long vaddr;
+ int idx, type;
+
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+
+ if (!PageHighMem(page))
+@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_pte(kmap_pte-idx, pte);
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
+@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
+ #endif
+
+ pagefault_enable();
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index ada98b39b8ad..585f6829653b 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
+
+ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ {
++ pte_t pte = pfn_pte(pfn, prot);
+ unsigned long vaddr;
+ int idx, type;
+
+@@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ WARN_ON(!pte_none(*(kmap_pte - idx)));
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_pte(kmap_pte - idx, pte);
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ }
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index fdb4d42b4ce5..8ab90fbecff0 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -729,9 +729,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
+
+ quiesce_local_uvhub(hmaster);
+
+- spin_lock(&hmaster->queue_lock);
++ raw_spin_lock(&hmaster->queue_lock);
+ reset_with_ipi(&bau_desc->distribution, bcp);
+- spin_unlock(&hmaster->queue_lock);
++ raw_spin_unlock(&hmaster->queue_lock);
+
+ end_uvhub_quiesce(hmaster);
+
+@@ -751,9 +751,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
+
+ quiesce_local_uvhub(hmaster);
+
+- spin_lock(&hmaster->queue_lock);
++ raw_spin_lock(&hmaster->queue_lock);
+ reset_with_ipi(&bau_desc->distribution, bcp);
+- spin_unlock(&hmaster->queue_lock);
++ raw_spin_unlock(&hmaster->queue_lock);
+
+ end_uvhub_quiesce(hmaster);
+
+@@ -774,7 +774,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
+ cycles_t tm1;
+
+ hmaster = bcp->uvhub_master;
+- spin_lock(&hmaster->disable_lock);
++ raw_spin_lock(&hmaster->disable_lock);
+ if (!bcp->baudisabled) {
+ stat->s_bau_disabled++;
+ tm1 = get_cycles();
+@@ -787,7 +787,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
+ }
+ }
+ }
+- spin_unlock(&hmaster->disable_lock);
++ raw_spin_unlock(&hmaster->disable_lock);
+ }
+
+ static void count_max_concurr(int stat, struct bau_control *bcp,
+@@ -850,7 +850,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
+ */
+ static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
+ {
+- spinlock_t *lock = &hmaster->uvhub_lock;
++ raw_spinlock_t *lock = &hmaster->uvhub_lock;
+ atomic_t *v;
+
+ v = &hmaster->active_descriptor_count;
+@@ -983,7 +983,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
+ struct bau_control *hmaster;
+
+ hmaster = bcp->uvhub_master;
+- spin_lock(&hmaster->disable_lock);
++ raw_spin_lock(&hmaster->disable_lock);
+ if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
+ stat->s_bau_reenabled++;
+ for_each_present_cpu(tcpu) {
+@@ -995,10 +995,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
+ tbcp->period_giveups = 0;
+ }
+ }
+- spin_unlock(&hmaster->disable_lock);
++ raw_spin_unlock(&hmaster->disable_lock);
+ return 0;
+ }
+- spin_unlock(&hmaster->disable_lock);
++ raw_spin_unlock(&hmaster->disable_lock);
+ return -1;
+ }
+
+@@ -1916,9 +1916,9 @@ static void __init init_per_cpu_tunables(void)
+ bcp->cong_reps = congested_reps;
+ bcp->disabled_period = sec_2_cycles(disabled_period);
+ bcp->giveup_limit = giveup_limit;
+- spin_lock_init(&bcp->queue_lock);
+- spin_lock_init(&bcp->uvhub_lock);
+- spin_lock_init(&bcp->disable_lock);
++ raw_spin_lock_init(&bcp->queue_lock);
++ raw_spin_lock_init(&bcp->uvhub_lock);
++ raw_spin_lock_init(&bcp->disable_lock);
+ }
+ }
+
+diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
+index b333fc45f9ec..8b85916e6986 100644
+--- a/arch/x86/platform/uv/uv_time.c
++++ b/arch/x86/platform/uv/uv_time.c
+@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+
+ /* There is one of these allocated per node */
+ struct uv_rtc_timer_head {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ /* next cpu waiting for timer, local node relative: */
+ int next_cpu;
+ /* number of cpus on this node: */
+@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
+ uv_rtc_deallocate_timers();
+ return -ENOMEM;
+ }
+- spin_lock_init(&head->lock);
++ raw_spin_lock_init(&head->lock);
+ head->ncpus = uv_blade_nr_possible_cpus(bid);
+ head->next_cpu = -1;
+ blade_info[bid] = head;
+@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
+ unsigned long flags;
+ int next_cpu;
+
+- spin_lock_irqsave(&head->lock, flags);
++ raw_spin_lock_irqsave(&head->lock, flags);
+
+ next_cpu = head->next_cpu;
+ *t = expires;
+@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
+ if (uv_setup_intr(cpu, expires)) {
+ *t = ULLONG_MAX;
+ uv_rtc_find_next_timer(head, pnode);
+- spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
+ return -ETIME;
+ }
+ }
+
+- spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
+ return 0;
+ }
+
+@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
+ unsigned long flags;
+ int rc = 0;
+
+- spin_lock_irqsave(&head->lock, flags);
++ raw_spin_lock_irqsave(&head->lock, flags);
+
+ if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
+ rc = 1;
+@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
+ uv_rtc_find_next_timer(head, pnode);
+ }
+
+- spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
+
+ return rc;
+ }
+@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
+ static cycle_t uv_read_rtc(struct clocksource *cs)
+ {
+ unsigned long offset;
++ cycle_t cycles;
+
++ preempt_disable();
+ if (uv_get_min_hub_revision_id() == 1)
+ offset = 0;
+ else
+ offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
+
+- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
++ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
++ preempt_enable();
++
++ return cycles;
+ }
+
+ /*
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 36c7ac328d8c..caa5fc1be2a2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
+
+ INIT_LIST_HEAD(&rq->queuelist);
+ INIT_LIST_HEAD(&rq->timeout_list);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
++#endif
+ rq->cpu = -1;
+ rq->q = q;
+ rq->__sector = (sector_t) -1;
+@@ -233,7 +236,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+ **/
+ void blk_start_queue(struct request_queue *q)
+ {
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+@@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
+ if (nowait)
+ return -EBUSY;
+
+- ret = wait_event_interruptible(q->mq_freeze_wq,
++ ret = swait_event_interruptible(q->mq_freeze_wq,
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
+ if (blk_queue_dying(q))
+@@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+ struct request_queue *q =
+ container_of(ref, struct request_queue, q_usage_counter);
+
+- wake_up_all(&q->mq_freeze_wq);
++ swake_up_all(&q->mq_freeze_wq);
+ }
+
+ static void blk_rq_timed_out_timer(unsigned long data)
+@@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+- init_waitqueue_head(&q->mq_freeze_wq);
++ init_swait_queue_head(&q->mq_freeze_wq);
+
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+@@ -3171,7 +3174,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
+ blk_run_queue_async(q);
+ else
+ __blk_run_queue(q);
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ }
+
+ static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
+@@ -3219,7 +3222,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+ struct request_queue *q;
+- unsigned long flags;
+ struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
+@@ -3239,11 +3241,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ q = NULL;
+ depth = 0;
+
+- /*
+- * Save and disable interrupts here, to avoid doing it for every
+- * queue lock we have to take.
+- */
+- local_irq_save(flags);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+@@ -3256,7 +3253,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ queue_unplugged(q, depth, from_schedule);
+ q = rq->q;
+ depth = 0;
+- spin_lock(q->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ }
+
+ /*
+@@ -3283,8 +3280,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+-
+- local_irq_restore(flags);
+ }
+
+ void blk_finish_plug(struct blk_plug *plug)
+diff --git a/block/blk-ioc.c b/block/blk-ioc.c
+index 381cb50a673c..dc8785233d94 100644
+--- a/block/blk-ioc.c
++++ b/block/blk-ioc.c
+@@ -7,6 +7,7 @@
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+
+ #include "blk.h"
+
+@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+ }
+ }
+@@ -187,7 +188,7 @@ void put_io_context_active(struct io_context *ioc)
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ goto retry;
+ }
+ }
+diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
+index bb3ed488f7b5..628c6c13c482 100644
+--- a/block/blk-mq-cpu.c
++++ b/block/blk-mq-cpu.c
+@@ -16,7 +16,7 @@
+ #include "blk-mq.h"
+
+ static LIST_HEAD(blk_mq_cpu_notify_list);
+-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
++static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+
+ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+@@ -25,7 +25,10 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ struct blk_mq_cpu_notifier *notify;
+ int ret = NOTIFY_OK;
+
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ if (action != CPU_POST_DEAD)
++ return NOTIFY_OK;
++
++ spin_lock(&blk_mq_cpu_notify_lock);
+
+ list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
+ ret = notify->notify(notify->data, action, cpu);
+@@ -33,7 +36,7 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ break;
+ }
+
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ return ret;
+ }
+
+@@ -41,16 +44,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+ {
+ BUG_ON(!notifier->notify);
+
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ spin_lock(&blk_mq_cpu_notify_lock);
+ list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ }
+
+ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+ {
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ spin_lock(&blk_mq_cpu_notify_lock);
+ list_del(&notifier->list);
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ }
+
+ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index c207fa9870eb..ac71b0455e9f 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
+
+ static void blk_mq_freeze_queue_wait(struct request_queue *q)
+ {
+- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
++ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+ }
+
+ /*
+@@ -130,7 +130,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
+ WARN_ON_ONCE(freeze_depth < 0);
+ if (!freeze_depth) {
+ percpu_ref_reinit(&q->q_usage_counter);
+- wake_up_all(&q->mq_freeze_wq);
++ swake_up_all(&q->mq_freeze_wq);
+ }
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+@@ -149,7 +149,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
+ * dying, we need to ensure that processes currently waiting on
+ * the queue are notified as well.
+ */
+- wake_up_all(&q->mq_freeze_wq);
++ swake_up_all(&q->mq_freeze_wq);
+ }
+
+ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
+@@ -197,6 +197,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
+ rq->resid_len = 0;
+ rq->sense = NULL;
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
++#endif
+ INIT_LIST_HEAD(&rq->timeout_list);
+ rq->timeout = 0;
+
+@@ -379,6 +382,17 @@ void blk_mq_end_request(struct request *rq, int error)
+ }
+ EXPORT_SYMBOL(blk_mq_end_request);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++void __blk_mq_complete_request_remote_work(struct work_struct *work)
++{
++ struct request *rq = container_of(work, struct request, work);
++
++ rq->q->softirq_done_fn(rq);
++}
++
++#else
++
+ static void __blk_mq_complete_request_remote(void *data)
+ {
+ struct request *rq = data;
+@@ -386,6 +400,8 @@ static void __blk_mq_complete_request_remote(void *data)
+ rq->q->softirq_done_fn(rq);
+ }
+
++#endif
++
+ static void blk_mq_ipi_complete_request(struct request *rq)
+ {
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+@@ -397,19 +413,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
+ return;
+ }
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ shared = cpus_share_cache(cpu, ctx->cpu);
+
+ if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ schedule_work_on(ctx->cpu, &rq->work);
++#else
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.info = rq;
+ rq->csd.flags = 0;
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
++#endif
+ } else {
+ rq->q->softirq_done_fn(rq);
+ }
+- put_cpu();
++ put_cpu_light();
+ }
+
+ static void __blk_mq_complete_request(struct request *rq)
+@@ -938,14 +958,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+ return;
+
+ if (!async) {
+- int cpu = get_cpu();
++ int cpu = get_cpu_light();
+ if (cpumask_test_cpu(cpu, hctx->cpumask)) {
+ __blk_mq_run_hw_queue(hctx);
+- put_cpu();
++ put_cpu_light();
+ return;
+ }
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+@@ -1667,7 +1687,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
+ {
+ struct blk_mq_hw_ctx *hctx = data;
+
+- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
++ if (action == CPU_POST_DEAD)
+ return blk_mq_hctx_cpu_offline(hctx, cpu);
+
+ /*
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 9087b11037b7..0401d76e827c 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -86,12 +86,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ */
+ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+ {
+- return __blk_mq_get_ctx(q, get_cpu());
++ return __blk_mq_get_ctx(q, get_cpu_light());
+ }
+
+ static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+ {
+- put_cpu();
++ put_cpu_light();
+ }
+
+ struct blk_mq_alloc_data {
+diff --git a/block/blk-softirq.c b/block/blk-softirq.c
+index 53b1737e978d..81c3c0a62edf 100644
+--- a/block/blk-softirq.c
++++ b/block/blk-softirq.c
+@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /*
+@@ -93,6 +94,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+ this_cpu_ptr(&blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+@@ -150,6 +152,7 @@ void __blk_complete_request(struct request *req)
+ goto do_local;
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /**
+diff --git a/block/bounce.c b/block/bounce.c
+index 1cb5dd3a5da1..2f1ec8a67cbe 100644
+--- a/block/bounce.c
++++ b/block/bounce.c
+@@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
+ unsigned long flags;
+ unsigned char *vto;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ vto = kmap_atomic(to->bv_page);
+ memcpy(vto + to->bv_offset, vfrom, to->bv_len);
+ kunmap_atomic(vto);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ #else /* CONFIG_HIGHMEM */
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index df939b54b09f..efe5e06adcf7 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -718,13 +718,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
+ int crypto_register_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_register(&crypto_chain, nb);
++ return srcu_notifier_chain_register(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_register_notifier);
+
+ int crypto_unregister_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_unregister(&crypto_chain, nb);
++ return srcu_notifier_chain_unregister(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
+
+diff --git a/crypto/api.c b/crypto/api.c
+index bbc147cb5dec..bc1a848f02ec 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
+ DECLARE_RWSEM(crypto_alg_sem);
+ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+
+-BLOCKING_NOTIFIER_HEAD(crypto_chain);
++SRCU_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
+ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+@@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
+ {
+ int ok;
+
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ if (ok == NOTIFY_DONE) {
+ request_module("cryptomgr");
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ return ok;
+diff --git a/crypto/internal.h b/crypto/internal.h
+index 7eefcdb00227..0ecc7f5a2f40 100644
+--- a/crypto/internal.h
++++ b/crypto/internal.h
+@@ -47,7 +47,7 @@ struct crypto_larval {
+
+ extern struct list_head crypto_alg_list;
+ extern struct rw_semaphore crypto_alg_sem;
+-extern struct blocking_notifier_head crypto_chain;
++extern struct srcu_notifier_head crypto_chain;
+
+ #ifdef CONFIG_PROC_FS
+ void __init crypto_init_proc(void);
+@@ -146,7 +146,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
+
+ static inline void crypto_notify(unsigned long val, void *v)
+ {
+- blocking_notifier_call_chain(&crypto_chain, val, v);
++ srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ #endif /* _CRYPTO_INTERNAL_H */
+diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
+index fded776236e2..bda523219d50 100644
+--- a/drivers/acpi/acpica/acglobal.h
++++ b/drivers/acpi/acpica/acglobal.h
+@@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
+ * interrupt level
+ */
+ ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
+-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
++ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
+ ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
+
+ /* Mutex for _OSI support */
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
+index 3b7fb99362b6..696bf8e62afb 100644
+--- a/drivers/acpi/acpica/hwregs.c
++++ b/drivers/acpi/acpica/hwregs.c
+@@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
+ ACPI_BITMASK_ALL_FIXED_STATUS,
+ ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
+
+- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+
+ /* Clear the fixed events in PM1 A/B */
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITMASK_ALL_FIXED_STATUS);
+
+- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
+index 98c26ff39409..6e236f2ea791 100644
+--- a/drivers/acpi/acpica/hwxface.c
++++ b/drivers/acpi/acpica/hwxface.c
+@@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+
+ /*
+ * At this point, we know that the parent register is one of the
+@@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
+
+ unlock_and_exit:
+
+- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
+index 15073375bd00..357e7ca5a587 100644
+--- a/drivers/acpi/acpica/utmutex.c
++++ b/drivers/acpi/acpica/utmutex.c
+@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
+ return_ACPI_STATUS (status);
+ }
+
+- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+@@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void)
+ /* Delete the spinlocks */
+
+ acpi_os_delete_lock(acpi_gbl_gpe_lock);
+- acpi_os_delete_lock(acpi_gbl_hardware_lock);
++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
+ acpi_os_delete_lock(acpi_gbl_reference_count_lock);
+
+ /* Delete the reader/writer lock */
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 051b6158d1b7..7ad293bef6ed 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
+ unsigned long flags;
+ unsigned int consumed;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ return consumed;
+ }
+@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ unsigned long flags;
+
+ /* FIXME: use a bounce buffer */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ buf = kmap_atomic(page);
+
+ /* do the actual data transfer */
+@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ do_write);
+
+ kunmap_atomic(buf);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ } else {
+ buf = page_address(page);
+ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+ unsigned long flags;
+
+ /* FIXME: use bounce buffer */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ buf = kmap_atomic(page);
+
+ /* do the actual data transfer */
+@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+ count, rw);
+
+ kunmap_atomic(buf);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ } else {
+ buf = page_address(page);
+ consumed = ap->ops->sff_data_xfer(dev, buf + offset,
+diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
+index 4b5cd3a7b2b6..fa8329ad79fd 100644
+--- a/drivers/block/zram/zcomp.c
++++ b/drivers/block/zram/zcomp.c
+@@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
+
+ struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
+ {
+- return *get_cpu_ptr(comp->stream);
++ struct zcomp_strm *zstrm;
++
++ zstrm = *this_cpu_ptr(comp->stream);
++ spin_lock(&zstrm->zcomp_lock);
++ return zstrm;
+ }
+
+ void zcomp_stream_put(struct zcomp *comp)
+ {
+- put_cpu_ptr(comp->stream);
++ struct zcomp_strm *zstrm;
++
++ zstrm = *this_cpu_ptr(comp->stream);
++ spin_unlock(&zstrm->zcomp_lock);
+ }
+
+ int zcomp_compress(struct zcomp_strm *zstrm,
+@@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
+ pr_err("Can't allocate a compression stream\n");
+ return NOTIFY_BAD;
+ }
++ spin_lock_init(&zstrm->zcomp_lock);
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ break;
+ case CPU_DEAD:
+diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
+index 478cac2ed465..f7a6efdc3285 100644
+--- a/drivers/block/zram/zcomp.h
++++ b/drivers/block/zram/zcomp.h
+@@ -14,6 +14,7 @@ struct zcomp_strm {
+ /* compression/decompression buffer */
+ void *buffer;
+ struct crypto_comp *tfm;
++ spinlock_t zcomp_lock;
+ };
+
+ /* dynamic per-device compression frontend */
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 04365b17ee67..b4a0577a4dbc 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -519,6 +519,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
+ goto out_error;
+ }
+
++ zram_meta_init_table_locks(meta, disksize);
++
+ return meta;
+
+ out_error:
+@@ -566,28 +568,28 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle;
+ unsigned int size;
++ struct zcomp_strm *zstrm;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ handle = meta->table[index].handle;
+ size = zram_get_obj_size(meta, index);
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ clear_page(mem);
+ return 0;
+ }
+
++ zstrm = zcomp_stream_get(zram->comp);
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (size == PAGE_SIZE) {
+ copy_page(mem, cmem);
+ } else {
+- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
+-
+ ret = zcomp_decompress(zstrm, cmem, size, mem);
+- zcomp_stream_put(zram->comp);
+ }
+ zs_unmap_object(meta->mem_pool, handle);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zcomp_stream_put(zram->comp);
++ zram_unlock_table(&meta->table[index]);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret)) {
+@@ -607,14 +609,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ struct zram_meta *meta = zram->meta;
+ page = bvec->bv_page;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ handle_zero_page(bvec);
+ return 0;
+ }
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ if (is_partial_io(bvec))
+ /* Use a temporary buffer to decompress the page */
+@@ -691,10 +693,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ if (user_mem)
+ kunmap_atomic(user_mem);
+ /* Free memory associated with this sector now. */
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+ zram_set_flag(meta, index, ZRAM_ZERO);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ atomic64_inc(&zram->stats.zero_pages);
+ ret = 0;
+@@ -785,12 +787,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+
+ meta->table[index].handle = handle;
+ zram_set_obj_size(meta, index, clen);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ /* Update stats */
+ atomic64_add(clen, &zram->stats.compr_data_size);
+@@ -833,9 +835,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
+ }
+
+ while (n >= PAGE_SIZE) {
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ atomic64_inc(&zram->stats.notify_free);
+ index++;
+ n -= PAGE_SIZE;
+@@ -964,9 +966,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ zram = bdev->bd_disk->private_data;
+ meta = zram->meta;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ atomic64_inc(&zram->stats.notify_free);
+ }
+
+diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
+index 74fcf10da374..fd4020c99b9e 100644
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -73,6 +73,9 @@ enum zram_pageflags {
+ struct zram_table_entry {
+ unsigned long handle;
+ unsigned long value;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t lock;
++#endif
+ };
+
+ struct zram_stats {
+@@ -120,4 +123,42 @@ struct zram {
+ */
+ bool claim; /* Protected by bdev->bd_mutex */
+ };
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++static inline void zram_lock_table(struct zram_table_entry *table)
++{
++ bit_spin_lock(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_unlock_table(struct zram_table_entry *table)
++{
++ bit_spin_unlock(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
++#else /* CONFIG_PREEMPT_RT_BASE */
++static inline void zram_lock_table(struct zram_table_entry *table)
++{
++ spin_lock(&table->lock);
++ __set_bit(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_unlock_table(struct zram_table_entry *table)
++{
++ __clear_bit(ZRAM_ACCESS, &table->value);
++ spin_unlock(&table->lock);
++}
++
++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
++{
++ size_t num_pages = disksize >> PAGE_SHIFT;
++ size_t index;
++
++ for (index = 0; index < num_pages; index++) {
++ spinlock_t *lock = &meta->table[index].lock;
++ spin_lock_init(lock);
++ }
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ #endif
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 3efb3bf0ab83..c894d2e266f3 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1028,8 +1028,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ } sample;
+ long delta, delta2, delta3;
+
+- preempt_disable();
+-
+ sample.jiffies = jiffies;
+ sample.cycles = random_get_entropy();
+ sample.num = num;
+@@ -1070,7 +1068,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ */
+ credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ }
+- preempt_enable();
+ }
+
+ void add_input_randomness(unsigned int type, unsigned int code,
+@@ -1123,28 +1120,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+ return *(ptr + f->reg_idx++);
+ }
+
+-void add_interrupt_randomness(int irq, int irq_flags)
++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+ {
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+- struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ cycles_t cycles = random_get_entropy();
+ __u32 c_high, j_high;
+- __u64 ip;
+ unsigned long seed;
+ int credit = 0;
+
+ if (cycles == 0)
+- cycles = get_reg(fast_pool, regs);
++ cycles = get_reg(fast_pool, NULL);
+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+ fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
+ fast_pool->pool[1] ^= now ^ c_high;
+- ip = regs ? instruction_pointer(regs) : _RET_IP_;
++ if (!ip)
++ ip = _RET_IP_;
+ fast_pool->pool[2] ^= ip;
+ fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
+- get_reg(fast_pool, regs);
++ get_reg(fast_pool, NULL);
+
+ fast_mix(fast_pool);
+ add_interrupt_bench(cycles);
+diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
+index 4da2af9694a2..5b6f57f500b8 100644
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
+@@ -23,8 +23,7 @@
+ * this 32 bit free-running counter. the second channel is not used.
+ *
+ * - The third channel may be used to provide a 16-bit clockevent
+- * source, used in either periodic or oneshot mode. This runs
+- * at 32 KiHZ, and can handle delays of up to two seconds.
++ * source, used in either periodic or oneshot mode.
+ *
+ * A boot clocksource and clockevent source are also currently needed,
+ * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+@@ -74,6 +73,8 @@ static struct clocksource clksrc = {
+ struct tc_clkevt_device {
+ struct clock_event_device clkevt;
+ struct clk *clk;
++ bool clk_enabled;
++ u32 freq;
+ void __iomem *regs;
+ };
+
+@@ -82,15 +83,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
+ return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ }
+
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+ static u32 timer_clock;
+
++static void tc_clk_disable(struct clock_event_device *d)
++{
++ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
++
++ clk_disable(tcd->clk);
++ tcd->clk_enabled = false;
++}
++
++static void tc_clk_enable(struct clock_event_device *d)
++{
++ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
++
++ if (tcd->clk_enabled)
++ return;
++ clk_enable(tcd->clk);
++ tcd->clk_enabled = true;
++}
++
+ static int tc_shutdown(struct clock_event_device *d)
+ {
+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+@@ -98,8 +110,14 @@ static int tc_shutdown(struct clock_event_device *d)
+
+ __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+ __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
++ return 0;
++}
++
++static int tc_shutdown_clk_off(struct clock_event_device *d)
++{
++ tc_shutdown(d);
+ if (!clockevent_state_detached(d))
+- clk_disable(tcd->clk);
++ tc_clk_disable(d);
+
+ return 0;
+ }
+@@ -112,9 +130,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+ tc_shutdown(d);
+
+- clk_enable(tcd->clk);
++ tc_clk_enable(d);
+
+- /* slow clock, count up to RC, then irq and stop */
++ /* count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+ ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -134,12 +152,12 @@ static int tc_set_periodic(struct clock_event_device *d)
+ /* By not making the gentime core emulate periodic mode on top
+ * of oneshot, we get lower overhead and improved accuracy.
+ */
+- clk_enable(tcd->clk);
++ tc_clk_enable(d);
+
+- /* slow clock, count up to RC, then irq and restart */
++ /* count up to RC, then irq and restart */
+ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -166,9 +184,13 @@ static struct tc_clkevt_device clkevt = {
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ /* Should be lower than at91rm9200's system timer */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ .rating = 125,
++#else
++ .rating = 200,
++#endif
+ .set_next_event = tc_next_event,
+- .set_state_shutdown = tc_shutdown,
++ .set_state_shutdown = tc_shutdown_clk_off,
+ .set_state_periodic = tc_set_periodic,
+ .set_state_oneshot = tc_set_oneshot,
+ },
+@@ -188,8 +210,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
+ return IRQ_NONE;
+ }
+
+-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+ {
++ unsigned divisor = atmel_tc_divisors[divisor_idx];
+ int ret;
+ struct clk *t2_clk = tc->clk[2];
+ int irq = tc->irq[2];
+@@ -210,7 +233,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+ clkevt.regs = tc->regs;
+ clkevt.clk = t2_clk;
+
+- timer_clock = clk32k_divisor_idx;
++ timer_clock = divisor_idx;
++ if (!divisor)
++ clkevt.freq = 32768;
++ else
++ clkevt.freq = clk_get_rate(t2_clk) / divisor;
+
+ clkevt.clkevt.cpumask = cpumask_of(0);
+
+@@ -221,7 +248,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+ return ret;
+ }
+
+- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
+
+ return ret;
+ }
+@@ -358,7 +385,11 @@ static int __init tcb_clksrc_init(void)
+ goto err_disable_t1;
+
+ /* channel 2: periodic and oneshot timer support */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ ret = setup_clkevents(tc, clk32k_divisor_idx);
++#else
++ ret = setup_clkevents(tc, best_divisor_idx);
++#endif
+ if (ret)
+ goto err_unregister_clksrc;
+
+diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
+index 7f0f5b26d8c5..1553f19e73e7 100644
+--- a/drivers/clocksource/timer-atmel-pit.c
++++ b/drivers/clocksource/timer-atmel-pit.c
+@@ -46,6 +46,7 @@ struct pit_data {
+ u32 cycle;
+ u32 cnt;
+ unsigned int irq;
++ bool irq_requested;
+ struct clk *mck;
+ };
+
+@@ -96,15 +97,29 @@ static int pit_clkevt_shutdown(struct clock_event_device *dev)
+
+ /* disable irq, leaving the clocksource active */
+ pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
++ if (data->irq_requested) {
++ free_irq(data->irq, data);
++ data->irq_requested = false;
++ }
+ return 0;
+ }
+
++static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id);
+ /*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+ static int pit_clkevt_set_periodic(struct clock_event_device *dev)
+ {
+ struct pit_data *data = clkevt_to_pit_data(dev);
++ int ret;
++
++ ret = request_irq(data->irq, at91sam926x_pit_interrupt,
++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
++ "at91_tick", data);
++ if (ret)
++ panic(pr_fmt("Unable to setup IRQ\n"));
++
++ data->irq_requested = true;
+
+ /* update clocksource counter */
+ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+@@ -211,15 +226,6 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
+ return ret;
+ }
+
+- /* Set up irq handler */
+- ret = request_irq(data->irq, at91sam926x_pit_interrupt,
+- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+- "at91_tick", data);
+- if (ret) {
+- pr_err("Unable to setup IRQ\n");
+- return ret;
+- }
+-
+ /* Set up and register clockevents */
+ data->clkevt.name = "pit";
+ data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
+diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
+index e90ab5b63a90..9e124087c55f 100644
+--- a/drivers/clocksource/timer-atmel-st.c
++++ b/drivers/clocksource/timer-atmel-st.c
+@@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_irq(void)
+ last_crtr = read_CRTR();
+ }
+
++static int atmel_st_irq;
++
+ static int clkevt32k_shutdown(struct clock_event_device *evt)
+ {
+ clkdev32k_disable_and_flush_irq();
+ irqmask = 0;
+ regmap_write(regmap_st, AT91_ST_IER, irqmask);
++ free_irq(atmel_st_irq, regmap_st);
+ return 0;
+ }
+
+ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
+ {
++ int ret;
++
+ clkdev32k_disable_and_flush_irq();
+
++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
++ "at91_tick", regmap_st);
++ if (ret)
++ panic(pr_fmt("Unable to setup IRQ\n"));
++
+ /*
+ * ALM for oneshot irqs, set by next_event()
+ * before 32 seconds have passed.
+@@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
+
+ static int clkevt32k_set_periodic(struct clock_event_device *dev)
+ {
++ int ret;
++
+ clkdev32k_disable_and_flush_irq();
+
++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
++ "at91_tick", regmap_st);
++ if (ret)
++ panic(pr_fmt("Unable to setup IRQ\n"));
++
+ /* PIT for periodic irqs; fixed rate of 1/HZ */
+ irqmask = AT91_ST_PITS;
+ regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
+@@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(struct device_node *node)
+ {
+ struct clk *sclk;
+ unsigned int sclk_rate, val;
+- int irq, ret;
++ int ret;
+
+ regmap_st = syscon_node_to_regmap(node);
+ if (IS_ERR(regmap_st)) {
+@@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(struct device_node *node)
+ regmap_read(regmap_st, AT91_ST_SR, &val);
+
+ /* Get the interrupts property */
+- irq = irq_of_parse_and_map(node, 0);
+- if (!irq) {
++ atmel_st_irq = irq_of_parse_and_map(node, 0);
++ if (!atmel_st_irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
+
+- /* Make IRQs happen for the system timer */
+- ret = request_irq(irq, at91rm9200_timer_interrupt,
+- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+- "at91_tick", regmap_st);
+- if (ret) {
+- pr_err("Unable to setup IRQ\n");
+- return ret;
+- }
+-
+ sclk = of_clk_get(node, 0);
+ if (IS_ERR(sclk)) {
+ pr_err("Unable to get slow clock\n");
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index a782ce87715c..19d265948526 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -32,6 +32,7 @@
+ #include <linux/pid_namespace.h>
+
+ #include <linux/cn_proc.h>
++#include <linux/locallock.h>
+
+ /*
+ * Size of a cn_msg followed by a proc_event structure. Since the
+@@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
+
+ /* proc_event_counts is used as the sequence number of the netlink message */
+ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
++static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock);
+
+ static inline void send_msg(struct cn_msg *msg)
+ {
+- preempt_disable();
++ local_lock(send_msg_lock);
+
+ msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+ ((struct proc_event *)msg->data)->cpu = smp_processor_id();
+@@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg)
+ */
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
+
+- preempt_enable();
++ local_unlock(send_msg_lock);
+ }
+
+ void proc_fork_connector(struct task_struct *task)
+diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
+index adbd1de1cea5..1fac5074f2cf 100644
+--- a/drivers/cpufreq/Kconfig.x86
++++ b/drivers/cpufreq/Kconfig.x86
+@@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
+
+ config X86_POWERNOW_K8
+ tristate "AMD Opteron/Athlon64 PowerNow!"
+- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
+ help
+ This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+ Support for K10 and newer processors is now in acpi-cpufreq.
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index b35e5b6475b2..ce60807fb1d4 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1302,7 +1302,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
+ if (ret)
+ return ret;
+
++#ifndef CONFIG_PREEMPT_RT_BASE
+ trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
++#endif
+
+ i915_gem_execbuffer_move_to_active(vmas, params->request);
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+index 6f10b421487b..dd3a9a6ace11 100644
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+ if (!mutex_is_locked(mutex))
+ return false;
+
+-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
++#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ return mutex->owner == task;
+ #else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 1c2aec392412..1d85c0c791f1 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_disable_rt();
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+@@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_enable_rt();
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e9a64fba6333..2aac27b13d86 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -11647,7 +11647,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
+
+- WARN_ON(!in_interrupt());
++ WARN_ON_NONRT(!in_interrupt());
+
+ if (crtc == NULL)
+ return;
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
+index 4178849631ad..0eb939c92544 100644
+--- a/drivers/gpu/drm/i915/intel_sprite.c
++++ b/drivers/gpu/drm/i915/intel_sprite.c
+@@ -38,6 +38,7 @@
+ #include "intel_drv.h"
+ #include <drm/i915_drm.h>
+ #include "i915_drv.h"
++#include <linux/locallock.h>
+
+ static bool
+ format_is_yuv(uint32_t format)
+@@ -64,6 +65,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ 1000 * adjusted_mode->crtc_htotal);
+ }
+
++static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock);
++
+ /**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @crtc: the crtc of which the registers are going to be updated
+@@ -94,7 +97,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
+ max = vblank_start - 1;
+
+- local_irq_disable();
++ local_lock_irq(pipe_update_lock);
+
+ if (min <= 0 || max <= 0)
+ return;
+@@ -124,11 +127,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
+ break;
+ }
+
+- local_irq_enable();
++ local_unlock_irq(pipe_update_lock);
+
+ timeout = schedule_timeout(timeout);
+
+- local_irq_disable();
++ local_lock_irq(pipe_update_lock);
+ }
+
+ finish_wait(wq, &wait);
+@@ -180,7 +183,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
+ crtc->base.state->event = NULL;
+ }
+
+- local_irq_enable();
++ local_unlock_irq(pipe_update_lock);
+
+ if (crtc->debug.start_vbl_count &&
+ crtc->debug.start_vbl_count != end_vbl_count) {
+diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+index 283d2841ba58..d01f6ed1977e 100644
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+ if (!mutex_is_locked(mutex))
+ return false;
+
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
+ return mutex->owner == task;
+ #else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index c3206fb8f4cf..6e2423186e2a 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1869,6 +1869,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_disable_rt();
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+@@ -1961,6 +1962,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_enable_rt();
+
+ /* Decode into vertical and horizontal scanout position. */
+ *vpos = position & 0x1fff;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index e82f7e1c217c..b57d917b6ab7 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -761,6 +761,8 @@ static void vmbus_isr(void)
+ void *page_addr;
+ struct hv_message *msg;
+ union hv_synic_event_flags *event;
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+ bool handled = false;
+
+ page_addr = hv_context.synic_event_page[cpu];
+@@ -808,7 +810,7 @@ static void vmbus_isr(void)
+ tasklet_schedule(hv_context.msg_dpc[cpu]);
+ }
+
+- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
++ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
+ }
+
+
+diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
+index 36f76e28a0bf..394f142f90c7 100644
+--- a/drivers/ide/alim15x3.c
++++ b/drivers/ide/alim15x3.c
+@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
+
+ isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ if (m5229_revision < 0xC2) {
+ /*
+@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
+ }
+ pci_dev_put(north);
+ pci_dev_put(isa_dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return 0;
+ }
+
+diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
+index 0ceae5cbd89a..c212e85d7f3e 100644
+--- a/drivers/ide/hpt366.c
++++ b/drivers/ide/hpt366.c
+@@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
+
+ dma_old = inb(base + 2);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ dma_new = dma_old;
+ pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
+@@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
+ if (dma_new != dma_old)
+ outb(dma_new, base + 2);
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
+index 19763977568c..4169433faab5 100644
+--- a/drivers/ide/ide-io-std.c
++++ b/drivers/ide/ide-io-std.c
+@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
+ insl(data_addr, buf, words);
+
+ if ((io_32bit & 2) && !mmio)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ if (((len + 1) & 3) < 2)
+ return;
+@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
+ outsl(data_addr, buf, words);
+
+ if ((io_32bit & 2) && !mmio)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ if (((len + 1) & 3) < 2)
+ return;
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
+index 669ea1e45795..e12e43e62245 100644
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+- local_irq_disable();
++ local_irq_disable_nort();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
+index 376f2dc410c5..f014dd1b73dc 100644
+--- a/drivers/ide/ide-iops.c
++++ b/drivers/ide/ide-iops.c
+@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
+index 0b63facd1d87..4ceba37afc0c 100644
+--- a/drivers/ide/ide-probe.c
++++ b/drivers/ide/ide-probe.c
+@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
+ int bswap = 1;
+
+ /* local CPU only; some systems need this */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ /* read 512 bytes of id info */
+ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ drive->dev_flags |= IDE_DFLAG_ID_READ;
+ #ifdef DEBUG
+diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
+index a716693417a3..be0568c722d6 100644
+--- a/drivers/ide/ide-taskfile.c
++++ b/drivers/ide/ide-taskfile.c
+@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
+
+ page_is_high = PageHighMem(page);
+ if (page_is_high)
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ buf = kmap_atomic(page) + offset;
+
+@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
+ kunmap_atomic(buf);
+
+ if (page_is_high)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ len -= nr_bytes;
+ }
+@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+- local_irq_disable();
++ local_irq_disable_nort();
+
+ ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
+
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index d3394b6add24..506bfba6ec9f 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -897,7 +897,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
+
+ ipoib_dbg_mcast(priv, "restarting multicast task\n");
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ netif_addr_lock(dev);
+ spin_lock(&priv->lock);
+
+@@ -979,7 +979,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
+
+ spin_unlock(&priv->lock);
+ netif_addr_unlock(dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ /*
+ * make sure the in-flight joins have finished before we attempt
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
+index 4a2a9e370be7..e970d9afd179 100644
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -91,13 +91,13 @@ static int gameport_measure_speed(struct gameport *gameport)
+ tx = ~0;
+
+ for (i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ t1 = ktime_get_ns();
+ for (t = 0; t < 50; t++)
+ gameport_read(gameport);
+ t2 = ktime_get_ns();
+ t3 = ktime_get_ns();
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ t = (t2 - t1) - (t3 - t2);
+ if (t < tx)
+@@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport)
+ tx = 1 << 30;
+
+ for(i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ GET_TIME(t1);
+ for (t = 0; t < 50; t++) gameport_read(gameport);
+ GET_TIME(t2);
+ GET_TIME(t3);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
+ }
+@@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport)
+ tx = 1 << 30;
+
+ for(i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ t1 = rdtsc();
+ for (t = 0; t < 50; t++) gameport_read(gameport);
+ t2 = rdtsc();
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ if (t2 - t1 < tx) tx = t2 - t1;
+ }
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 96de97a46079..a6ec875d941b 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1832,10 +1832,10 @@ static int __attach_device(struct iommu_dev_data *dev_data,
+ int ret;
+
+ /*
+- * Must be called with IRQs disabled. Warn here to detect early
+- * when its not.
++ * Must be called with IRQs disabled on a non RT kernel. Warn here to
++ * detect early when its not.
+ */
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+
+ /* lock domain */
+ spin_lock(&domain->lock);
+@@ -2003,10 +2003,10 @@ static void __detach_device(struct iommu_dev_data *dev_data)
+ struct protection_domain *domain;
+
+ /*
+- * Must be called with IRQs disabled. Warn here to detect early
+- * when its not.
++ * Must be called with IRQs disabled on a non RT kernel. Warn here to
++ * detect early when its not.
+ */
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+
+ if (WARN_ON(!dev_data->domain))
+ return;
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index ebb5bf3ddbd9..598f5df45f6b 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -479,7 +479,7 @@ struct deferred_flush_data {
+ struct deferred_flush_table *tables;
+ };
+
+-DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
++static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
+
+ /* bitmap for indexing intel_iommus */
+ static int g_num_of_iommus;
+@@ -3626,10 +3626,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+ struct intel_iommu *iommu;
+ struct deferred_flush_entry *entry;
+ struct deferred_flush_data *flush_data;
+- unsigned int cpuid;
+
+- cpuid = get_cpu();
+- flush_data = per_cpu_ptr(&deferred_flush, cpuid);
++ flush_data = raw_cpu_ptr(&deferred_flush);
+
+ /* Flush all CPUs' entries to avoid deferring too much. If
+ * this becomes a bottleneck, can just flush us, and rely on
+@@ -3662,8 +3660,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+ }
+ flush_data->size++;
+ spin_unlock_irqrestore(&flush_data->lock, flags);
+-
+- put_cpu();
+ }
+
+ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index e23001bfcfee..359d5d169ec0 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -22,6 +22,7 @@
+ #include <linux/slab.h>
+ #include <linux/smp.h>
+ #include <linux/bitops.h>
++#include <linux/cpu.h>
+
+ static bool iova_rcache_insert(struct iova_domain *iovad,
+ unsigned long pfn,
+@@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+
+ /* Try replenishing IOVAs by flushing rcache. */
+ flushed_rcache = true;
+- preempt_disable();
+ for_each_online_cpu(cpu)
+ free_cpu_cached_iovas(cpu, iovad);
+- preempt_enable();
+ goto retry;
+ }
+
+@@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
+ bool can_insert = false;
+ unsigned long flags;
+
+- cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
++ cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_full(cpu_rcache->loaded)) {
+@@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
+ iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+- put_cpu_ptr(rcache->cpu_rcaches);
+
+ if (mag_to_free) {
+ iova_magazine_free_pfns(mag_to_free, iovad);
+@@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
+ bool has_pfn = false;
+ unsigned long flags;
+
+- cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
++ cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_empty(cpu_rcache->loaded)) {
+@@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
+ iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+- put_cpu_ptr(rcache->cpu_rcaches);
+
+ return iova_pfn;
+ }
+diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
+index 3f9ddb9fafa7..09da5b6b44a1 100644
+--- a/drivers/leds/trigger/Kconfig
++++ b/drivers/leds/trigger/Kconfig
+@@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
+
+ config LEDS_TRIGGER_CPU
+ bool "LED CPU Trigger"
+- depends on LEDS_TRIGGERS
++ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
+ help
+ This allows LEDs to be controlled by active CPUs. This shows
+ the active CPUs across an array of LEDs so you can see which
+diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
+index 4d200883c505..98b64ed5cb81 100644
+--- a/drivers/md/bcache/Kconfig
++++ b/drivers/md/bcache/Kconfig
+@@ -1,6 +1,7 @@
+
+ config BCACHE
+ tristate "Block device as cache"
++ depends on !PREEMPT_RT_FULL
+ ---help---
+ Allows a block device to be used as cache for other devices; uses
+ a btree for indexing and the layout is optimized for SSDs.
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 5da86c8b6545..2aa092f2977e 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -811,7 +811,7 @@ static void dm_old_request_fn(struct request_queue *q)
+ /* Establish tio->ti before queuing work (map_tio_request) */
+ tio->ti = ti;
+ queue_kthread_work(&md->kworker, &tio->work);
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ }
+ }
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index ee7fc3701700..ae59c9e13911 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ percpu = per_cpu_ptr(conf->percpu, cpu);
++ spin_lock(&percpu->lock);
+ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
+ ops_run_biofill(sh);
+ overlap_clear++;
+@@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&sh->raid_conf->wait_for_overlap);
+ }
+- put_cpu();
++ spin_unlock(&percpu->lock);
++ put_cpu_light();
+ }
+
+ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+@@ -6438,6 +6440,7 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+ __func__, cpu);
+ break;
+ }
++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ }
+ put_online_cpus();
+
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index 517d4b68a1be..efe91887ecd7 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -504,6 +504,7 @@ struct r5conf {
+ int recovery_disabled;
+ /* per cpu variables */
+ struct raid5_percpu {
++ spinlock_t lock; /* Protection for -RT */
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ struct flex_array *scribble; /* space for constructing buffer
+ * lists and performing address
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index d00252828966..9faab404faac 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC
+ are combined to make a single 32-bit timer.
+
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
++ may be used as a clock event device supporting oneshot mode.
+
+ config ATMEL_TCB_CLKSRC_BLOCK
+ int
+@@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
+
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y if !PREEMPT_RT_FULL
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock
++ source for clock events.
++
++
+ config DUMMY_IRQ
+ tristate "Dummy IRQ handler"
+ default n
+@@ -114,6 +123,35 @@ config IBM_ASM
+ for information on the specific driver level and support statement
+ for your IBM server.
+
++config HWLAT_DETECTOR
++ tristate "Testing module to detect hardware-induced latencies"
++ depends on DEBUG_FS
++ depends on RING_BUFFER
++ default m
++ ---help---
++ A simple hardware latency detector. Use this module to detect
++ large latencies introduced by the behavior of the underlying
++ system firmware external to Linux. We do this using periodic
++ use of stop_machine to grab all available CPUs and measure
++ for unexplainable gaps in the CPU timestamp counter(s). By
++ default, the module is not enabled until the "enable" file
++ within the "hwlat_detector" debugfs directory is toggled.
++
++ This module is often used to detect SMI (System Management
++ Interrupts) on x86 systems, though is not x86 specific. To
++ this end, we default to using a sample window of 1 second,
++ during which we will sample for 0.5 seconds. If an SMI or
++ similar event occurs during that time, it is recorded
++ into an 8K samples global ring buffer until retreived.
++
++ WARNING: This software should never be enabled (it can be built
++ but should not be turned on after it is loaded) in a production
++ environment where high latencies are a concern since the
++ sampling mechanism actually introduces latencies for
++ regular tasks while the CPU(s) are being held.
++
++ If unsure, say N
++
+ config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index fb32516ddfe2..8643df9af3c4 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -38,6 +38,7 @@ obj-$(CONFIG_C2PORT) += c2port/
+ obj-$(CONFIG_HMC6352) += hmc6352.o
+ obj-y += eeprom/
+ obj-y += cb710/
++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
+ obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
+ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
+ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
+diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
+new file mode 100644
+index 000000000000..52f5ad5fd9c0
+--- /dev/null
++++ b/drivers/misc/hwlat_detector.c
+@@ -0,0 +1,1240 @@
++/*
++ * hwlat_detector.c - A simple Hardware Latency detector.
++ *
++ * Use this module to detect large system latencies induced by the behavior of
++ * certain underlying system hardware or firmware, independent of Linux itself.
++ * The code was developed originally to detect the presence of SMIs on Intel
++ * and AMD systems, although there is no dependency upon x86 herein.
++ *
++ * The classical example usage of this module is in detecting the presence of
++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
++ * somewhat special form of hardware interrupt spawned from earlier CPU debug
++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
++ * LPC (or other device) to generate a special interrupt under certain
++ * circumstances, for example, upon expiration of a special SMI timer device,
++ * due to certain external thermal readings, on certain I/O address accesses,
++ * and other situations. An SMI hits a special CPU pin, triggers a special
++ * SMI mode (complete with special memory map), and the OS is unaware.
++ *
++ * Although certain hardware-inducing latencies are necessary (for example,
++ * a modern system often requires an SMI handler for correct thermal control
++ * and remote management) they can wreak havoc upon any OS-level performance
++ * guarantees toward low-latency, especially when the OS is not even made
++ * aware of the presence of these interrupts. For this reason, we need a
++ * somewhat brute force mechanism to detect these interrupts. In this case,
++ * we do it by hogging all of the CPU(s) for configurable timer intervals,
++ * sampling the built-in CPU timer, looking for discontiguous readings.
++ *
++ * WARNING: This implementation necessarily introduces latencies. Therefore,
++ * you should NEVER use this module in a production environment
++ * requiring any kind of low-latency performance guarantee(s).
++ *
++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
++ *
++ * Includes useful feedback from Clark Williams <clark@redhat.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/ring_buffer.h>
++#include <linux/time.h>
++#include <linux/hrtimer.h>
++#include <linux/kthread.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++#include <linux/version.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/trace_clock.h>
++
++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
++#define U64STR_SIZE 22 /* 20 digits max */
++
++#define VERSION "1.0.0"
++#define BANNER "hwlat_detector: "
++#define DRVNAME "hwlat_detector"
++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
++
++/* Module metadata */
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
++MODULE_DESCRIPTION("A simple hardware latency detector");
++MODULE_VERSION(VERSION);
++
++/* Module parameters */
++
++static int debug;
++static int enabled;
++static int threshold;
++
++module_param(debug, int, 0); /* enable debug */
++module_param(enabled, int, 0); /* enable detector */
++module_param(threshold, int, 0); /* latency threshold */
++
++/* Buffering and sampling */
++
++static struct ring_buffer *ring_buffer; /* sample buffer */
++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
++static unsigned long buf_size = BUF_SIZE_DEFAULT;
++static struct task_struct *kthread; /* sampling thread */
++
++/* DebugFS filesystem entries */
++
++static struct dentry *debug_dir; /* debugfs directory */
++static struct dentry *debug_max; /* maximum TSC delta */
++static struct dentry *debug_count; /* total detect count */
++static struct dentry *debug_sample_width; /* sample width us */
++static struct dentry *debug_sample_window; /* sample window us */
++static struct dentry *debug_sample; /* raw samples us */
++static struct dentry *debug_threshold; /* threshold us */
++static struct dentry *debug_enable; /* enable/disable */
++
++/* Individual samples and global state */
++
++struct sample; /* latency sample */
++struct data; /* Global state */
++
++/* Sampling functions */
++static int __buffer_add_sample(struct sample *sample);
++static struct sample *buffer_get_sample(struct sample *sample);
++
++/* Threading and state */
++static int kthread_fn(void *unused);
++static int start_kthread(void);
++static int stop_kthread(void);
++static void __reset_stats(void);
++static int init_stats(void);
++
++/* Debugfs interface */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos, const u64 *entry);
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos, u64 *entry);
++static int debug_sample_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos);
++static int debug_sample_release(struct inode *inode, struct file *filp);
++static int debug_enable_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos);
++static ssize_t debug_enable_fwrite(struct file *file,
++ const char __user *user_buffer,
++ size_t user_size, loff_t *offset);
++
++/* Initialization functions */
++static int init_debugfs(void);
++static void free_debugfs(void);
++static int detector_init(void);
++static void detector_exit(void);
++
++/* Individual latency samples are stored here when detected and packed into
++ * the ring_buffer circular buffer, where they are overwritten when
++ * more than buf_size/sizeof(sample) samples are received. */
++struct sample {
++ u64 seqnum; /* unique sequence */
++ u64 duration; /* ktime delta */
++ u64 outer_duration; /* ktime delta (outer loop) */
++ struct timespec timestamp; /* wall time */
++ unsigned long lost;
++};
++
++/* keep the global state somewhere. */
++static struct data {
++
++ struct mutex lock; /* protect changes */
++
++ u64 count; /* total since reset */
++ u64 max_sample; /* max hardware latency */
++ u64 threshold; /* sample threshold level */
++
++ u64 sample_window; /* total sampling window (on+off) */
++ u64 sample_width; /* active sampling portion of window */
++
++ atomic_t sample_open; /* whether the sample file is open */
++
++ wait_queue_head_t wq; /* waitqeue for new sample values */
++
++} data;
++
++/**
++ * __buffer_add_sample - add a new latency sample recording to the ring buffer
++ * @sample: The new latency sample value
++ *
++ * This receives a new latency sample and records it in a global ring buffer.
++ * No additional locking is used in this case.
++ */
++static int __buffer_add_sample(struct sample *sample)
++{
++ return ring_buffer_write(ring_buffer,
++ sizeof(struct sample), sample);
++}
++
++/**
++ * buffer_get_sample - remove a hardware latency sample from the ring buffer
++ * @sample: Pre-allocated storage for the sample
++ *
++ * This retrieves a hardware latency sample from the global circular buffer
++ */
++static struct sample *buffer_get_sample(struct sample *sample)
++{
++ struct ring_buffer_event *e = NULL;
++ struct sample *s = NULL;
++ unsigned int cpu = 0;
++
++ if (!sample)
++ return NULL;
++
++ mutex_lock(&ring_buffer_mutex);
++ for_each_online_cpu(cpu) {
++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
++ if (e)
++ break;
++ }
++
++ if (e) {
++ s = ring_buffer_event_data(e);
++ memcpy(sample, s, sizeof(struct sample));
++ } else
++ sample = NULL;
++ mutex_unlock(&ring_buffer_mutex);
++
++ return sample;
++}
++
++#ifndef CONFIG_TRACING
++#define time_type ktime_t
++#define time_get() ktime_get()
++#define time_to_us(x) ktime_to_us(x)
++#define time_sub(a, b) ktime_sub(a, b)
++#define init_time(a, b) (a).tv64 = b
++#define time_u64(a) ((a).tv64)
++#else
++#define time_type u64
++#define time_get() trace_clock_local()
++#define time_to_us(x) div_u64(x, 1000)
++#define time_sub(a, b) ((a) - (b))
++#define init_time(a, b) (a = b)
++#define time_u64(a) a
++#endif
++/**
++ * get_sample - sample the CPU TSC and look for likely hardware latencies
++ *
++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
++ * hardware-induced latency. Called with interrupts disabled and with
++ * data.lock held.
++ */
++static int get_sample(void)
++{
++ time_type start, t1, t2, last_t2;
++ s64 diff, total = 0;
++ u64 sample = 0;
++ u64 outer_sample = 0;
++ int ret = -1;
++
++ init_time(last_t2, 0);
++ start = time_get(); /* start timestamp */
++
++ do {
++
++ t1 = time_get(); /* we'll look for a discontinuity */
++ t2 = time_get();
++
++ if (time_u64(last_t2)) {
++ /* Check the delta from outer loop (t2 to next t1) */
++ diff = time_to_us(time_sub(t1, last_t2));
++ /* This shouldn't happen */
++ if (diff < 0) {
++ pr_err(BANNER "time running backwards\n");
++ goto out;
++ }
++ if (diff > outer_sample)
++ outer_sample = diff;
++ }
++ last_t2 = t2;
++
++ total = time_to_us(time_sub(t2, start)); /* sample width */
++
++ /* This checks the inner loop (t1 to t2) */
++ diff = time_to_us(time_sub(t2, t1)); /* current diff */
++
++ /* This shouldn't happen */
++ if (diff < 0) {
++ pr_err(BANNER "time running backwards\n");
++ goto out;
++ }
++
++ if (diff > sample)
++ sample = diff; /* only want highest value */
++
++ } while (total <= data.sample_width);
++
++ ret = 0;
++
++ /* If we exceed the threshold value, we have found a hardware latency */
++ if (sample > data.threshold || outer_sample > data.threshold) {
++ struct sample s;
++
++ ret = 1;
++
++ data.count++;
++ s.seqnum = data.count;
++ s.duration = sample;
++ s.outer_duration = outer_sample;
++ s.timestamp = CURRENT_TIME;
++ __buffer_add_sample(&s);
++
++ /* Keep a running maximum ever recorded hardware latency */
++ if (sample > data.max_sample)
++ data.max_sample = sample;
++ }
++
++out:
++ return ret;
++}
++
++/*
++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
++ * @unused: A required part of the kthread API.
++ *
++ * Used to periodically sample the CPU TSC via a call to get_sample. We
++ * disable interrupts, which does (intentionally) introduce latency since we
++ * need to ensure nothing else might be running (and thus pre-empting).
++ * Obviously this should never be used in production environments.
++ *
++ * Currently this runs on which ever CPU it was scheduled on, but most
++ * real-worald hardware latency situations occur across several CPUs,
++ * but we might later generalize this if we find there are any actualy
++ * systems with alternate SMI delivery or other hardware latencies.
++ */
++static int kthread_fn(void *unused)
++{
++ int ret;
++ u64 interval;
++
++ while (!kthread_should_stop()) {
++
++ mutex_lock(&data.lock);
++
++ local_irq_disable();
++ ret = get_sample();
++ local_irq_enable();
++
++ if (ret > 0)
++ wake_up(&data.wq); /* wake up reader(s) */
++
++ interval = data.sample_window - data.sample_width;
++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
++
++ mutex_unlock(&data.lock);
++
++ if (msleep_interruptible(interval))
++ break;
++ }
++
++ return 0;
++}
++
++/**
++ * start_kthread - Kick off the hardware latency sampling/detector kthread
++ *
++ * This starts a kernel thread that will sit and sample the CPU timestamp
++ * counter (TSC or similar) and look for potential hardware latencies.
++ */
++static int start_kthread(void)
++{
++ kthread = kthread_run(kthread_fn, NULL,
++ DRVNAME);
++ if (IS_ERR(kthread)) {
++ pr_err(BANNER "could not start sampling thread\n");
++ enabled = 0;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++/**
++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
++ *
++ * This kicks the running hardware latency sampling/detector kernel thread and
++ * tells it to stop sampling now. Use this on unload and at system shutdown.
++ */
++static int stop_kthread(void)
++{
++ int ret;
++
++ ret = kthread_stop(kthread);
++
++ return ret;
++}
++
++/**
++ * __reset_stats - Reset statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We call this
++ * function in order to reset those when "enable" is toggled on or off, and
++ * also at initialization. Should be called with data.lock held.
++ */
++static void __reset_stats(void)
++{
++ data.count = 0;
++ data.max_sample = 0;
++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
++}
++
++/**
++ * init_stats - Setup global state statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We also use
++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
++ * induced system latencies. This function initializes these structures and
++ * allocates the global ring buffer also.
++ */
++static int init_stats(void)
++{
++ int ret = -ENOMEM;
++
++ mutex_init(&data.lock);
++ init_waitqueue_head(&data.wq);
++ atomic_set(&data.sample_open, 0);
++
++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
++
++ if (WARN(!ring_buffer, KERN_ERR BANNER
++ "failed to allocate ring buffer!\n"))
++ goto out;
++
++ __reset_stats();
++ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
++
++ ret = 0;
++
++out:
++ return ret;
++
++}
++
++/*
++ * simple_data_read - Wrapper read function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ * @entry: The entry to read from
++ *
++ * This function provides a generic read implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_read directly, but we need to make sure that the data.lock
++ * is held during the actual read.
++ */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos, const u64 *entry)
++{
++ char buf[U64STR_SIZE];
++ u64 val = 0;
++ int len = 0;
++
++ memset(buf, 0, sizeof(buf));
++
++ if (!entry)
++ return -EFAULT;
++
++ mutex_lock(&data.lock);
++ val = *entry;
++ mutex_unlock(&data.lock);
++
++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
++
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
++
++}
++
++/*
++ * simple_data_write - Wrapper write function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to write value from
++ * @cnt: The maximum number of bytes to write
++ * @ppos: The current "file" position
++ * @entry: The entry to write to
++ *
++ * This function provides a generic write implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_write directly, but we need to make sure that the data.lock
++ * is held during the actual write.
++ */
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos, u64 *entry)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = kstrtoull(buf, 10, &val);
++ if (err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ *entry = val;
++ mutex_unlock(&data.lock);
++
++ return csize;
++}
++
++/**
++ * debug_count_fopen - Open function for "count" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "count" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_count_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_count_fread - Read function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to read the
++ * number of latency readings exceeding the configured threshold since
++ * the detector was last reset (e.g. by writing a zero into "count").
++ */
++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_count_fwrite - Write function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to write a
++ * desired value, especially to zero the total count.
++ */
++static ssize_t debug_count_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "enable" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_enable_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_enable_fread - Read function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * whether the detector is currently enabled ("0\n" or "1\n" returned).
++ */
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[4];
++
++ if ((cnt < sizeof(buf)) || (*ppos))
++ return 0;
++
++ buf[0] = enabled ? '1' : '0';
++ buf[1] = '\n';
++ buf[2] = '\0';
++ if (copy_to_user(ubuf, buf, strlen(buf)))
++ return -EFAULT;
++ return *ppos = strlen(buf);
++}
++
++/**
++ * debug_enable_fwrite - Write function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to enable or
++ * disable the detector, which will have the side-effect of possibly
++ * also resetting the global stats and kicking off the measuring
++ * kthread (on an enable) or the converse (upon a disable).
++ */
++static ssize_t debug_enable_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[4];
++ int csize = min(cnt, sizeof(buf));
++ long val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[sizeof(buf)-1] = '\0'; /* just in case */
++ err = kstrtoul(buf, 10, &val);
++ if (err)
++ return -EINVAL;
++
++ if (val) {
++ if (enabled)
++ goto unlock;
++ enabled = 1;
++ __reset_stats();
++ if (start_kthread())
++ return -EFAULT;
++ } else {
++ if (!enabled)
++ goto unlock;
++ enabled = 0;
++ err = stop_kthread();
++ if (err) {
++ pr_err(BANNER "cannot stop kthread\n");
++ return -EFAULT;
++ }
++ wake_up(&data.wq); /* reader(s) should return */
++ }
++unlock:
++ return csize;
++}
++
++/**
++ * debug_max_fopen - Open function for "max" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "max" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_max_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_max_fread - Read function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * the maximum latency value observed since it was last reset.
++ */
++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++/**
++ * debug_max_fwrite - Write function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to reset the
++ * maximum or set it to some other desired value - if, then, subsequent
++ * measurements exceed this value, the maximum will be updated.
++ */
++static ssize_t debug_max_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++
++/**
++ * debug_sample_fopen - An open function for "sample" debugfs interface
++ * @inode: The in-kernel inode representation of this debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function handles opening the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. Can be opened blocking or non-blocking,
++ * affecting whether it behaves as a buffer read pipe, or does not.
++ * Implements simple locking to prevent multiple simultaneous use.
++ */
++static int debug_sample_fopen(struct inode *inode, struct file *filp)
++{
++ if (!atomic_add_unless(&data.sample_open, 1, 1))
++ return -EBUSY;
++ else
++ return 0;
++}
++
++/**
++ * debug_sample_fread - A read function for "sample" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that will contain the samples read
++ * @cnt: The maximum bytes to read from the debugfs "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function handles reading from the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. By default this will block pending a new
++ * value written into the sample buffer, unless there are already a
++ * number of value(s) waiting in the buffer, or the sample file was
++ * previously opened in a non-blocking mode of operation.
++ */
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ int len = 0;
++ char buf[64];
++ struct sample *sample = NULL;
++
++ if (!enabled)
++ return 0;
++
++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
++ if (!sample)
++ return -ENOMEM;
++
++ while (!buffer_get_sample(sample)) {
++
++ DEFINE_WAIT(wait);
++
++ if (filp->f_flags & O_NONBLOCK) {
++ len = -EAGAIN;
++ goto out;
++ }
++
++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&data.wq, &wait);
++
++ if (signal_pending(current)) {
++ len = -EINTR;
++ goto out;
++ }
++
++ if (!enabled) { /* enable was toggled */
++ len = 0;
++ goto out;
++ }
++ }
++
++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
++ sample->timestamp.tv_sec,
++ sample->timestamp.tv_nsec,
++ sample->duration,
++ sample->outer_duration);
++
++
++ /* handling partial reads is more trouble than it's worth */
++ if (len > cnt)
++ goto out;
++
++ if (copy_to_user(ubuf, buf, len))
++ len = -EFAULT;
++
++out:
++ kfree(sample);
++ return len;
++}
++
++/**
++ * debug_sample_release - Release function for "sample" debugfs interface
++ * @inode: The in-kernel inode represenation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function completes the close of the debugfs interface "sample" file.
++ * Frees the sample_open "lock" so that other users may open the interface.
++ */
++static int debug_sample_release(struct inode *inode, struct file *filp)
++{
++ atomic_dec(&data.sample_open);
++
++ return 0;
++}
++
++/**
++ * debug_threshold_fopen - Open function for "threshold" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "threshold" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_threshold_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_threshold_fread - Read function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * the current threshold level at which a latency will be recorded in the
++ * global ring buffer, typically on the order of 10us.
++ */
++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
++}
++
++/**
++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * the threshold level at which any subsequently detected latencies will
++ * be recorded into the global ring buffer.
++ */
++static ssize_t debug_threshold_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ int ret;
++
++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
++
++ if (enabled)
++ wake_up_process(kthread);
++
++ return ret;
++}
++
++/**
++ * debug_width_fopen - Open function for "width" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "width" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_width_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_width_fread - Read function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latecy periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch.
++ */
++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
++}
++
++/**
++ * debug_width_fwrite - Write function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latency periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch. It
++ * is enforced that width is less that the total window size.
++ */
++static ssize_t debug_width_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = kstrtoull(buf, 10, &val);
++ if (err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ if (val < data.sample_window)
++ data.sample_width = val;
++ else {
++ mutex_unlock(&data.lock);
++ return -EINVAL;
++ }
++ mutex_unlock(&data.lock);
++
++ if (enabled)
++ wake_up_process(kthread);
++
++ return csize;
++}
++
++/**
++ * debug_window_fopen - Open function for "window" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs.
++ */
++static int debug_window_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_window_fread - Read function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to read the total window size.
++ */
++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
++}
++
++/**
++ * debug_window_fwrite - Write function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "window" debufds
++ * interface to the hardware latency detetector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to write a new total window size. It
++ * is enfoced that any value written must be greater than the sample width
++ * size, or an error results.
++ */
++static ssize_t debug_window_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = kstrtoull(buf, 10, &val);
++ if (err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ if (data.sample_width < val)
++ data.sample_window = val;
++ else {
++ mutex_unlock(&data.lock);
++ return -EINVAL;
++ }
++ mutex_unlock(&data.lock);
++
++ return csize;
++}
++
++/*
++ * Function pointers for the "count" debugfs file operations
++ */
++static const struct file_operations count_fops = {
++ .open = debug_count_fopen,
++ .read = debug_count_fread,
++ .write = debug_count_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "enable" debugfs file operations
++ */
++static const struct file_operations enable_fops = {
++ .open = debug_enable_fopen,
++ .read = debug_enable_fread,
++ .write = debug_enable_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "max" debugfs file operations
++ */
++static const struct file_operations max_fops = {
++ .open = debug_max_fopen,
++ .read = debug_max_fread,
++ .write = debug_max_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "sample" debugfs file operations
++ */
++static const struct file_operations sample_fops = {
++ .open = debug_sample_fopen,
++ .read = debug_sample_fread,
++ .release = debug_sample_release,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "threshold" debugfs file operations
++ */
++static const struct file_operations threshold_fops = {
++ .open = debug_threshold_fopen,
++ .read = debug_threshold_fread,
++ .write = debug_threshold_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "width" debugfs file operations
++ */
++static const struct file_operations width_fops = {
++ .open = debug_width_fopen,
++ .read = debug_width_fread,
++ .write = debug_width_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "window" debugfs file operations
++ */
++static const struct file_operations window_fops = {
++ .open = debug_window_fopen,
++ .read = debug_window_fread,
++ .write = debug_window_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/**
++ * init_debugfs - A function to initialize the debugfs interface files
++ *
++ * This function creates entries in debugfs for "hwlat_detector", including
++ * files to read values from the detector, current samples, and the
++ * maximum sample that has been captured since the hardware latency
++ * dectector was started.
++ */
++static int init_debugfs(void)
++{
++ int ret = -ENOMEM;
++
++ debug_dir = debugfs_create_dir(DRVNAME, NULL);
++ if (!debug_dir)
++ goto err_debug_dir;
++
++ debug_sample = debugfs_create_file("sample", 0444,
++ debug_dir, NULL,
++ &sample_fops);
++ if (!debug_sample)
++ goto err_sample;
++
++ debug_count = debugfs_create_file("count", 0444,
++ debug_dir, NULL,
++ &count_fops);
++ if (!debug_count)
++ goto err_count;
++
++ debug_max = debugfs_create_file("max", 0444,
++ debug_dir, NULL,
++ &max_fops);
++ if (!debug_max)
++ goto err_max;
++
++ debug_sample_window = debugfs_create_file("window", 0644,
++ debug_dir, NULL,
++ &window_fops);
++ if (!debug_sample_window)
++ goto err_window;
++
++ debug_sample_width = debugfs_create_file("width", 0644,
++ debug_dir, NULL,
++ &width_fops);
++ if (!debug_sample_width)
++ goto err_width;
++
++ debug_threshold = debugfs_create_file("threshold", 0644,
++ debug_dir, NULL,
++ &threshold_fops);
++ if (!debug_threshold)
++ goto err_threshold;
++
++ debug_enable = debugfs_create_file("enable", 0644,
++ debug_dir, &enabled,
++ &enable_fops);
++ if (!debug_enable)
++ goto err_enable;
++
++ else {
++ ret = 0;
++ goto out;
++ }
++
++err_enable:
++ debugfs_remove(debug_threshold);
++err_threshold:
++ debugfs_remove(debug_sample_width);
++err_width:
++ debugfs_remove(debug_sample_window);
++err_window:
++ debugfs_remove(debug_max);
++err_max:
++ debugfs_remove(debug_count);
++err_count:
++ debugfs_remove(debug_sample);
++err_sample:
++ debugfs_remove(debug_dir);
++err_debug_dir:
++out:
++ return ret;
++}
++
++/**
++ * free_debugfs - A function to cleanup the debugfs file interface
++ */
++static void free_debugfs(void)
++{
++ /* could also use a debugfs_remove_recursive */
++ debugfs_remove(debug_enable);
++ debugfs_remove(debug_threshold);
++ debugfs_remove(debug_sample_width);
++ debugfs_remove(debug_sample_window);
++ debugfs_remove(debug_max);
++ debugfs_remove(debug_count);
++ debugfs_remove(debug_sample);
++ debugfs_remove(debug_dir);
++}
++
++/**
++ * detector_init - Standard module initialization code
++ */
++static int detector_init(void)
++{
++ int ret = -ENOMEM;
++
++ pr_info(BANNER "version %s\n", VERSION);
++
++ ret = init_stats();
++ if (ret)
++ goto out;
++
++ ret = init_debugfs();
++ if (ret)
++ goto err_stats;
++
++ if (enabled)
++ ret = start_kthread();
++
++ goto out;
++
++err_stats:
++ ring_buffer_free(ring_buffer);
++out:
++ return ret;
++
++}
++
++/**
++ * detector_exit - Standard module cleanup code
++ */
++static void detector_exit(void)
++{
++ int err;
++
++ if (enabled) {
++ enabled = 0;
++ err = stop_kthread();
++ if (err)
++ pr_err(BANNER "cannot stop kthread\n");
++ }
++
++ free_debugfs();
++ ring_buffer_free(ring_buffer); /* free up the ring buffer */
++
++}
++
++module_init(detector_init);
++module_exit(detector_exit);
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index df990bb8c873..1a162709a85e 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+- unsigned long flags;
+ u32 status;
+
+ status = readl(base + MMCISTATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+
+- local_irq_save(flags);
+-
+ do {
+ unsigned int remain, len;
+ char *buffer;
+@@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
+
+ sg_miter_stop(sg_miter);
+
+- local_irq_restore(flags);
+-
+ /*
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index 25c55ab05c7d..5a1d117a8744 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
+ {
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ #endif
+
+@@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net_device *dev)
+ * Block interrupts because vortex_interrupt does a bare spin_lock()
+ */
+ unsigned long flags;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (vp->full_bus_master_tx)
+ boomerang_interrupt(dev->irq, dev);
+ else
+ vortex_interrupt(dev->irq, dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index da4c2d8a4173..1420dfb56bac 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
+ struct rtl8139_private *tp = netdev_priv(dev);
+ const int irq = tp->pci_dev->irq;
+
+- disable_irq(irq);
++ disable_irq_nosync(irq);
+ rtl8139_interrupt(irq, dev);
+ enable_irq(irq);
+ }
+diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+index 56f109bc8394..02afc796bc71 100644
+--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
+ while (!ctx->done.done && msecs--)
+ udelay(1000);
+ } else {
+- wait_event_interruptible(ctx->done.wait,
++ swait_event_interruptible(ctx->done.wait,
+ ctx->done.done);
+ }
+ break;
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index d11cdbb8fba3..223bbb9acb03 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
+ WARN_ON(!dev->block_cfg_access);
+
+ dev->block_cfg_access = 0;
+- wake_up_all(&pci_cfg_wait);
++ wake_up_all_locked(&pci_cfg_wait);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 9bd41a35a78a..8e2d436c2e3f 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+ {
+ struct fcoe_percpu_s *fps;
+- int rc;
++ int rc, cpu = get_cpu_light();
+
+- fps = &get_cpu_var(fcoe_percpu);
++ fps = &per_cpu(fcoe_percpu, cpu);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+- put_cpu_var(fcoe_percpu);
++ put_cpu_light();
+
+ return rc;
+ }
+@@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
+ return 0;
+ }
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+- put_cpu();
++ put_cpu_light();
+ return -EINVAL;
+ }
+
+@@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ goto drop;
+
+ if (!fcoe_filter_frames(lport, fp)) {
+- put_cpu();
++ put_cpu_light();
+ fc_exch_recv(lport, fp);
+ return;
+ }
+ drop:
+ stats->ErrorFrames++;
+- put_cpu();
++ put_cpu_light();
+ kfree_skb(skb);
+ }
+
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index dcf36537a767..1a1f2e46452c 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+
+ INIT_LIST_HEAD(&del_list);
+
+- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
+
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+@@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+ sel_time = fcf->time;
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index e72673b0a8fb..da598a6caa22 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+ }
+ memset(ep, 0, sizeof(*ep));
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+- put_cpu();
++ put_cpu_light();
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 763f012fdeca..d0f61b595470 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ /* TODO: audit callers to ensure they are ready for qc_issue to
+ * unconditionally re-enable interrupts
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ spin_unlock(ap->lock);
+
+ /* If the device fell off, no sense in issuing commands */
+@@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+
+ out:
+ spin_lock(ap->lock);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return ret;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index edc48f3b8230..ee5c6f9dfb6f 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp)
+ {
+ unsigned long flags;
+ struct qla_hw_data *ha = rsp->hw;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (IS_P3P_TYPE(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ static inline uint8_t *
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 987f1c729e9c..18391e07d70f 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ * kref_put().
+ */
+ kref_get(&qentry->irq_notify.kref);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ swork_queue(&qentry->irq_notify.swork);
++#else
+ schedule_work(&qentry->irq_notify.work);
++#endif
+ }
+
+ /*
+diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
+index 97f0a2bd93ed..a4f45aaa9ad4 100644
+--- a/drivers/thermal/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/x86_pkg_temp_thermal.c
+@@ -29,6 +29,7 @@
+ #include <linux/pm.h>
+ #include <linux/thermal.h>
+ #include <linux/debugfs.h>
++#include <linux/swork.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/mce.h>
+
+@@ -352,7 +353,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
+ }
+ }
+
+-static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++static void platform_thermal_notify_work(struct swork_event *event)
+ {
+ unsigned long flags;
+ int cpu = smp_processor_id();
+@@ -369,7 +370,7 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
+ pkg_work_scheduled[phy_id]) {
+ disable_pkg_thres_interrupt();
+ spin_unlock_irqrestore(&pkg_work_lock, flags);
+- return -EINVAL;
++ return;
+ }
+ pkg_work_scheduled[phy_id] = 1;
+ spin_unlock_irqrestore(&pkg_work_lock, flags);
+@@ -378,9 +379,48 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
+ schedule_delayed_work_on(cpu,
+ &per_cpu(pkg_temp_thermal_threshold_work, cpu),
+ msecs_to_jiffies(notify_delay_ms));
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++static struct swork_event notify_work;
++
++static int thermal_notify_work_init(void)
++{
++ int err;
++
++ err = swork_get();
++ if (err)
++ return err;
++
++ INIT_SWORK(&notify_work, platform_thermal_notify_work);
+ return 0;
+ }
+
++static void thermal_notify_work_cleanup(void)
++{
++ swork_put();
++}
++
++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++{
++ swork_queue(&notify_work);
++ return 0;
++}
++
++#else /* !CONFIG_PREEMPT_RT_FULL */
++
++static int thermal_notify_work_init(void) { return 0; }
++
++static void thermal_notify_work_cleanup(void) { }
++
++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++{
++ platform_thermal_notify_work(NULL);
++
++ return 0;
++}
++#endif /* CONFIG_PREEMPT_RT_FULL */
++
+ static int find_siblings_cpu(int cpu)
+ {
+ int i;
+@@ -584,6 +624,9 @@ static int __init pkg_temp_thermal_init(void)
+ if (!x86_match_cpu(pkg_temp_thermal_ids))
+ return -ENODEV;
+
++ if (!thermal_notify_work_init())
++ return -ENODEV;
++
+ spin_lock_init(&pkg_work_lock);
+ platform_thermal_package_notify =
+ pkg_temp_thermal_platform_thermal_notify;
+@@ -608,7 +651,7 @@ static int __init pkg_temp_thermal_init(void)
+ kfree(pkg_work_scheduled);
+ platform_thermal_package_notify = NULL;
+ platform_thermal_package_rate_control = NULL;
+-
++ thermal_notify_work_cleanup();
+ return -ENODEV;
+ }
+
+@@ -633,6 +676,7 @@ static void __exit pkg_temp_thermal_exit(void)
+ mutex_unlock(&phy_dev_list_mutex);
+ platform_thermal_package_notify = NULL;
+ platform_thermal_package_rate_control = NULL;
++ thermal_notify_work_cleanup();
+ for_each_online_cpu(i)
+ cancel_delayed_work_sync(
+ &per_cpu(pkg_temp_thermal_threshold_work, i));
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index dcf43f66404f..a9ae57122841 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg;
+
+ static unsigned int skip_txen_test; /* force skip of txen test at init time */
+
+-#define PASS_LIMIT 512
++/*
++ * On -rt we can have a more delays, and legitimately
++ * so - so don't drop work spuriously and spam the
++ * syslog:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define PASS_LIMIT 1000000
++#else
++# define PASS_LIMIT 512
++#endif
+
+ #include <asm/serial.h>
+ /*
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 858a54633664..fc44fb59aef6 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -35,6 +35,7 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/kdb.h>
+ #include <linux/uaccess.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/timer.h>
+@@ -3109,9 +3110,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+
+ serial8250_rpm_get(up);
+
+- if (port->sysrq)
++ if (port->sysrq || oops_in_progress)
+ locked = 0;
+- else if (oops_in_progress)
++ else if (in_kdb_printk())
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 8a9e213387a7..dd1f9a426b74 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2167,13 +2167,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+
+ clk_enable(uap->clk);
+
+- local_irq_save(flags);
++ /*
++ * local_irq_save(flags);
++ *
++ * This local_irq_save() is nonsense. If we come in via sysrq
++ * handling then interrupts are already disabled. Aside of
++ * that the port.sysrq check is racy on SMP regardless.
++ */
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&uap->port.lock);
++ locked = spin_trylock_irqsave(&uap->port.lock, flags);
+ else
+- spin_lock(&uap->port.lock);
++ spin_lock_irqsave(&uap->port.lock, flags);
+
+ /*
+ * First save the CR then disable the interrupts
+@@ -2197,8 +2203,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ pl011_write(old_cr, uap, REG_CR);
+
+ if (locked)
+- spin_unlock(&uap->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ clk_disable(uap->clk);
+ }
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index a2a529994ba5..0ee7c4c518df 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console *co, const char *s,
+
+ pm_runtime_get_sync(up->dev);
+
+- local_irq_save(flags);
+- if (up->port.sysrq)
+- locked = 0;
+- else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ if (up->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+- spin_lock(&up->port.lock);
++ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console *co, const char *s,
+ pm_runtime_mark_last_busy(up->dev);
+ pm_runtime_put_autosuspend(up->dev);
+ if (locked)
+- spin_unlock(&up->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ static int __init
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index f36e6df2fa90..e086ea4d2997 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1240,7 +1240,7 @@ static int sc16is7xx_probe(struct device *dev,
+
+ /* Setup interrupt */
+ ret = devm_request_irq(dev, irq, sc16is7xx_irq,
+- IRQF_ONESHOT | flags, dev_name(dev), s);
++ flags, dev_name(dev), s);
+ if (!ret)
+ return 0;
+
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index d2e3f655c26f..fdd027a9bbd7 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1760,9 +1760,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
+ * and no one may trigger the above deadlock situation when
+ * running complete() in tasklet.
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ urb->complete(urb);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ usb_anchor_resume_wakeups(anchor);
+ atomic_dec(&urb->use_count);
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 5c8429f23a89..fa835fb1a186 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1509,7 +1509,7 @@ static void ffs_data_put(struct ffs_data *ffs)
+ pr_info("%s(): freeing\n", __func__);
+ ffs_data_clear(ffs);
+ BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
+- waitqueue_active(&ffs->ep0req_completion.wait));
++ swait_active(&ffs->ep0req_completion.wait));
+ kfree(ffs->dev_name);
+ kfree(ffs);
+ }
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 16104b5ebdcb..5c506c2b88ad 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
+ spin_unlock_irq (&epdata->dev->lock);
+
+ if (likely (value == 0)) {
+- value = wait_event_interruptible (done.wait, done.done);
++ value = swait_event_interruptible (done.wait, done.done);
+ if (value != 0) {
+ spin_lock_irq (&epdata->dev->lock);
+ if (likely (epdata->ep != NULL)) {
+@@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
+ usb_ep_dequeue (epdata->ep, epdata->req);
+ spin_unlock_irq (&epdata->dev->lock);
+
+- wait_event (done.wait, done.done);
++ swait_event (done.wait, done.done);
+ if (epdata->status == -ECONNRESET)
+ epdata->status = -EINTR;
+ } else {
+diff --git a/fs/aio.c b/fs/aio.c
+index 4fe81d1c60f9..e68c06a4a017 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -40,6 +40,7 @@
+ #include <linux/ramfs.h>
+ #include <linux/percpu-refcount.h>
+ #include <linux/mount.h>
++#include <linux/swork.h>
+
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+@@ -115,7 +116,7 @@ struct kioctx {
+ struct page **ring_pages;
+ long nr_pages;
+
+- struct work_struct free_work;
++ struct swork_event free_work;
+
+ /*
+ * signals when all in-flight requests are done
+@@ -258,6 +259,7 @@ static int __init aio_setup(void)
+ .mount = aio_mount,
+ .kill_sb = kill_anon_super,
+ };
++ BUG_ON(swork_get());
+ aio_mnt = kern_mount(&aio_fs);
+ if (IS_ERR(aio_mnt))
+ panic("Failed to create aio fs mount.");
+@@ -578,9 +580,9 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
+ return cancel(&kiocb->common);
+ }
+
+-static void free_ioctx(struct work_struct *work)
++static void free_ioctx(struct swork_event *sev)
+ {
+- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
+
+ pr_debug("freeing %p\n", ctx);
+
+@@ -599,8 +601,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
+ complete(&ctx->rq_wait->comp);
+
+- INIT_WORK(&ctx->free_work, free_ioctx);
+- schedule_work(&ctx->free_work);
++ INIT_SWORK(&ctx->free_work, free_ioctx);
++ swork_queue(&ctx->free_work);
+ }
+
+ /*
+@@ -608,9 +610,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
+ * now it's safe to cancel any that need to be.
+ */
+-static void free_ioctx_users(struct percpu_ref *ref)
++static void free_ioctx_users_work(struct swork_event *sev)
+ {
+- struct kioctx *ctx = container_of(ref, struct kioctx, users);
++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
+ struct aio_kiocb *req;
+
+ spin_lock_irq(&ctx->ctx_lock);
+@@ -629,6 +631,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
+ percpu_ref_put(&ctx->reqs);
+ }
+
++static void free_ioctx_users(struct percpu_ref *ref)
++{
++ struct kioctx *ctx = container_of(ref, struct kioctx, users);
++
++ INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
++ swork_queue(&ctx->free_work);
++}
++
+ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+ {
+ unsigned i, new_nr;
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index a439548de785..7c392647d03b 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
+@@ -30,6 +30,7 @@
+ #include <linux/sched.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
++#include <linux/delay.h>
+ #include <asm/current.h>
+ #include <linux/uaccess.h>
+
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index d8e6d421c27f..2e689ab1306b 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -148,7 +148,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
+ parent = p->d_parent;
+ if (!spin_trylock(&parent->d_lock)) {
+ spin_unlock(&p->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto relock;
+ }
+ spin_unlock(&p->d_lock);
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 9c8eb9b6db6a..d15d77f72cf7 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ * decide that the page is now completely done.
+ */
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+
+ /*
+ * If none of the buffers had errors and they are all
+@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+
+ /*
+@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+ }
+
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+
+ clear_buffer_async_write(bh);
+ unlock_buffer(bh);
+@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+ }
+ tmp = tmp->b_this_page;
+ }
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+ end_page_writeback(page);
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+ EXPORT_SYMBOL(end_buffer_async_write);
+
+@@ -3384,6 +3376,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+ if (ret) {
+ INIT_LIST_HEAD(&ret->b_assoc_buffers);
++ buffer_head_init_locks(ret);
+ preempt_disable();
+ __this_cpu_inc(bh_accounting.nr);
+ recalc_bh_state();
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 8f6a2a5863b9..4217828d0b68 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ struct inode *inode;
+ struct super_block *sb = parent->d_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 5c7cc953ac81..a9bb31f1c1af 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -19,6 +19,7 @@
+ #include <linux/mm.h>
+ #include <linux/fs.h>
+ #include <linux/fsnotify.h>
++#include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/hash.h>
+@@ -750,6 +751,8 @@ static inline bool fast_dput(struct dentry *dentry)
+ */
+ void dput(struct dentry *dentry)
+ {
++ struct dentry *parent;
++
+ if (unlikely(!dentry))
+ return;
+
+@@ -788,9 +791,18 @@ void dput(struct dentry *dentry)
+ return;
+
+ kill_it:
+- dentry = dentry_kill(dentry);
+- if (dentry) {
+- cond_resched();
++ parent = dentry_kill(dentry);
++ if (parent) {
++ int r;
++
++ if (parent == dentry) {
++ /* the task with the highest priority won't schedule */
++ r = cond_resched();
++ if (!r)
++ cpu_chill();
++ } else {
++ dentry = parent;
++ }
+ goto repeat;
+ }
+ }
+@@ -2321,7 +2333,7 @@ void d_delete(struct dentry * dentry)
+ if (dentry->d_lockref.count == 1) {
+ if (!spin_trylock(&inode->i_lock)) {
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto again;
+ }
+ dentry->d_flags &= ~DCACHE_CANT_MOUNT;
+@@ -2381,21 +2393,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n)
+
+ static void d_wait_lookup(struct dentry *dentry)
+ {
+- if (d_in_lookup(dentry)) {
+- DECLARE_WAITQUEUE(wait, current);
+- add_wait_queue(dentry->d_wait, &wait);
+- do {
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- spin_unlock(&dentry->d_lock);
+- schedule();
+- spin_lock(&dentry->d_lock);
+- } while (d_in_lookup(dentry));
+- }
++ struct swait_queue __wait;
++
++ if (!d_in_lookup(dentry))
++ return;
++
++ INIT_LIST_HEAD(&__wait.task_list);
++ do {
++ prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
++ spin_unlock(&dentry->d_lock);
++ schedule();
++ spin_lock(&dentry->d_lock);
++ } while (d_in_lookup(dentry));
++ finish_swait(dentry->d_wait, &__wait);
+ }
+
+ struct dentry *d_alloc_parallel(struct dentry *parent,
+ const struct qstr *name,
+- wait_queue_head_t *wq)
++ struct swait_queue_head *wq)
+ {
+ unsigned int hash = name->hash;
+ struct hlist_bl_head *b = in_lookup_hash(parent, hash);
+@@ -2504,7 +2519,7 @@ void __d_lookup_done(struct dentry *dentry)
+ hlist_bl_lock(b);
+ dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
+ __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
+- wake_up_all(dentry->d_wait);
++ swake_up_all(dentry->d_wait);
+ dentry->d_wait = NULL;
+ hlist_bl_unlock(b);
+ INIT_HLIST_NODE(&dentry->d_u.d_alias);
+@@ -3601,6 +3616,11 @@ EXPORT_SYMBOL(d_genocide);
+
+ void __init vfs_caches_init_early(void)
+ {
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
++ INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
++
+ dcache_init_early();
+ inode_init_early();
+ }
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 10db91218933..42af0a06f657 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
+ */
+ static void ep_poll_safewake(wait_queue_head_t *wq)
+ {
+- int this_cpu = get_cpu();
++ int this_cpu = get_cpu_light();
+
+ ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
+ ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
+diff --git a/fs/exec.c b/fs/exec.c
+index 6fcfb3f7b137..751370a71ec5 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1012,12 +1012,14 @@ static int exec_mmap(struct mm_struct *mm)
+ }
+ }
+ task_lock(tsk);
++ preempt_disable_rt();
+ active_mm = tsk->active_mm;
+ tsk->mm = mm;
+ tsk->active_mm = mm;
+ activate_mm(active_mm, mm);
+ tsk->mm->vmacache_seqnum = 0;
+ vmacache_flush(tsk);
++ preempt_enable_rt();
+ task_unlock(tsk);
+ if (old_mm) {
+ up_read(&old_mm->mmap_sem);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 4ff9251e9d3a..8fe489ec2ef1 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1174,7 +1174,7 @@ static int fuse_direntplus_link(struct file *file,
+ struct inode *dir = d_inode(parent);
+ struct fuse_conn *fc;
+ struct inode *inode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (!o->nodeid) {
+ /*
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 684996c8a3a4..6e18a06aaabe 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
+ nblocks = jbd2_space_needed(journal);
+ while (jbd2_log_space_left(journal) < nblocks) {
+ write_unlock(&journal->j_state_lock);
++ if (current->plug)
++ io_schedule();
+ mutex_lock(&journal->j_checkpoint_mutex);
+
+ /*
+diff --git a/fs/namei.c b/fs/namei.c
+index adb04146df09..a89dfaf9f209 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1629,7 +1629,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
+ {
+ struct dentry *dentry = ERR_PTR(-ENOENT), *old;
+ struct inode *inode = dir->d_inode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ inode_lock_shared(inode);
+ /* Don't go there if it's already dead */
+@@ -3086,7 +3086,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+ struct dentry *dentry;
+ int error, create_error = 0;
+ umode_t mode = op->mode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (unlikely(IS_DEADDIR(dir_inode)))
+ return -ENOENT;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7bb2cda3bfef..cf79b18e7b58 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -14,6 +14,7 @@
+ #include <linux/mnt_namespace.h>
+ #include <linux/user_namespace.h>
+ #include <linux/namei.h>
++#include <linux/delay.h>
+ #include <linux/security.h>
+ #include <linux/idr.h>
+ #include <linux/init.h> /* init_rootfs */
+@@ -353,8 +354,11 @@ int __mnt_want_write(struct vfsmount *m)
+ * incremented count after it has set MNT_WRITE_HOLD.
+ */
+ smp_mb();
+- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+- cpu_relax();
++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
++ preempt_enable();
++ cpu_chill();
++ preempt_disable();
++ }
+ /*
+ * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+ * be set to match its requirements. So we must not load that until
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index b9c65421ed81..03ffe8af8785 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -150,11 +150,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
+ sp = state->owner;
+ /* Block nfs4_proc_unlck */
+ mutex_lock(&sp->so_delegreturn_mutex);
+- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
++ seq = read_seqbegin(&sp->so_reclaim_seqlock);
+ err = nfs4_open_delegation_recall(ctx, state, stateid, type);
+ if (!err)
+ err = nfs_delegation_claim_locks(ctx, state, stateid);
+- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
++ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
+ err = -EAGAIN;
+ mutex_unlock(&sp->so_delegreturn_mutex);
+ put_nfs_open_context(ctx);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 6bc5a68e39f1..ce6488e07a13 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -485,7 +485,7 @@ static
+ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ {
+ struct qstr filename = QSTR_INIT(entry->name, entry->len);
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *dir = d_inode(parent);
+@@ -1490,7 +1490,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned open_flags,
+ umode_t mode, int *opened)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nfs_open_context *ctx;
+ struct dentry *res;
+ struct iattr attr = { .ia_valid = ATTR_OPEN };
+@@ -1805,7 +1805,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+
+ trace_nfs_rmdir_enter(dir, dentry);
+ if (d_really_is_positive(dentry)) {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ down(&NFS_I(d_inode(dentry))->rmdir_sem);
++#else
+ down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
++#endif
+ error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ /* Ensure the VFS deletes this inode */
+ switch (error) {
+@@ -1815,7 +1819,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+ case -ENOENT:
+ nfs_dentry_handle_enoent(dentry);
+ }
++#ifdef CONFIG_PREEMPT_RT_BASE
++ up(&NFS_I(d_inode(dentry))->rmdir_sem);
++#else
+ up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
++#endif
+ } else
+ error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ trace_nfs_rmdir_exit(dir, dentry, error);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index bf4ec5ecc97e..36cd5fc9192c 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1957,7 +1957,11 @@ static void init_once(void *foo)
+ nfsi->nrequests = 0;
+ nfsi->commit_info.ncommit = 0;
+ atomic_set(&nfsi->commit_info.rpcs_out, 0);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ sema_init(&nfsi->rmdir_sem, 1);
++#else
+ init_rwsem(&nfsi->rmdir_sem);
++#endif
+ nfs4_init_once(nfsi);
+ }
+
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 9bf64eacba5b..041da5cb80f5 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -107,7 +107,7 @@ struct nfs4_state_owner {
+ unsigned long so_flags;
+ struct list_head so_states;
+ struct nfs_seqid_counter so_seqid;
+- seqcount_t so_reclaim_seqcount;
++ seqlock_t so_reclaim_seqlock;
+ struct mutex so_delegreturn_mutex;
+ };
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index a9dec32ba9ba..49b64dfb307c 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2525,7 +2525,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ unsigned int seq;
+ int ret;
+
+- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
++ seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
+
+ ret = _nfs4_proc_open(opendata);
+ if (ret != 0)
+@@ -2561,7 +2561,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ ctx->state = state;
+ if (d_inode(dentry) == state->inode) {
+ nfs_inode_attach_open_context(ctx);
+- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
++ if (read_seqretry(&sp->so_reclaim_seqlock, seq))
+ nfs4_schedule_stateid_recovery(server, state);
+ }
+ out:
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 8353f33f0466..657e13ed4b5d 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
+ nfs4_init_seqid_counter(&sp->so_seqid);
+ atomic_set(&sp->so_count, 1);
+ INIT_LIST_HEAD(&sp->so_lru);
+- seqcount_init(&sp->so_reclaim_seqcount);
++ seqlock_init(&sp->so_reclaim_seqlock);
+ mutex_init(&sp->so_delegreturn_mutex);
+ return sp;
+ }
+@@ -1459,8 +1459,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
+ * recovering after a network partition or a reboot from a
+ * server that doesn't support a grace period.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ write_seqlock(&sp->so_reclaim_seqlock);
++#else
++ write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
++#endif
+ spin_lock(&sp->so_lock);
+- raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
+ restart:
+ list_for_each_entry(state, &sp->so_states, open_states) {
+ if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
+@@ -1528,14 +1532,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
+ spin_lock(&sp->so_lock);
+ goto restart;
+ }
+- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
+ spin_unlock(&sp->so_lock);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ write_sequnlock(&sp->so_reclaim_seqlock);
++#else
++ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
++#endif
+ return 0;
+ out_err:
+ nfs4_put_open_state(state);
+- spin_lock(&sp->so_lock);
+- raw_write_seqcount_end(&sp->so_reclaim_seqcount);
+- spin_unlock(&sp->so_lock);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ write_sequnlock(&sp->so_reclaim_seqlock);
++#else
++ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
++#endif
+ return status;
+ }
+
+diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
+index 191aa577dd1f..58990c8f52e0 100644
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -12,7 +12,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/sched.h>
+-#include <linux/wait.h>
++#include <linux/swait.h>
+ #include <linux/namei.h>
+ #include <linux/fsnotify.h>
+
+@@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
+ rpc_restart_call_prepare(task);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void nfs_down_anon(struct semaphore *sema)
++{
++ down(sema);
++}
++
++static void nfs_up_anon(struct semaphore *sema)
++{
++ up(sema);
++}
++
++#else
++static void nfs_down_anon(struct rw_semaphore *rwsem)
++{
++ down_read_non_owner(rwsem);
++}
++
++static void nfs_up_anon(struct rw_semaphore *rwsem)
++{
++ up_read_non_owner(rwsem);
++}
++#endif
++
+ /**
+ * nfs_async_unlink_release - Release the sillydelete data.
+ * @task: rpc_task of the sillydelete
+@@ -64,7 +87,7 @@ static void nfs_async_unlink_release(void *calldata)
+ struct dentry *dentry = data->dentry;
+ struct super_block *sb = dentry->d_sb;
+
+- up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
++ nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
+ d_lookup_done(dentry);
+ nfs_free_unlinkdata(data);
+ dput(dentry);
+@@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct dentry *alias;
+
+- down_read_non_owner(&NFS_I(dir)->rmdir_sem);
++ nfs_down_anon(&NFS_I(dir)->rmdir_sem);
+ alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
+ if (IS_ERR(alias)) {
+- up_read_non_owner(&NFS_I(dir)->rmdir_sem);
++ nfs_up_anon(&NFS_I(dir)->rmdir_sem);
+ return 0;
+ }
+ if (!d_in_lookup(alias)) {
+@@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
+ ret = 0;
+ spin_unlock(&alias->d_lock);
+ dput(alias);
+- up_read_non_owner(&NFS_I(dir)->rmdir_sem);
++ nfs_up_anon(&NFS_I(dir)->rmdir_sem);
+ /*
+ * If we'd displaced old cached devname, free it. At that
+ * point dentry is definitely not a root, so we won't need
+@@ -182,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
+ goto out_free_name;
+ }
+ data->res.dir_attr = &data->dir_attr;
+- init_waitqueue_head(&data->wq);
++ init_swait_queue_head(&data->wq);
+
+ status = -EBUSY;
+ spin_lock(&dentry->d_lock);
+diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
+index fe251f187ff8..e89da4fb14c2 100644
+--- a/fs/ntfs/aops.c
++++ b/fs/ntfs/aops.c
+@@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ ofs = 0;
+ if (file_ofs < init_size)
+ ofs = init_size - file_ofs;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ kaddr = kmap_atomic(page);
+ memset(kaddr + bh_offset(bh) + ofs, 0,
+ bh->b_size - ofs);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ } else {
+ clear_buffer_uptodate(bh);
+@@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ "0x%llx.", (unsigned long long)bh->b_blocknr);
+ }
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+ /*
+ * If none of the buffers had errors then we can set the page uptodate,
+ * but we first have to perform the post read mst fixups, if the
+@@ -145,13 +143,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ recs = PAGE_SIZE / rec_size;
+ /* Should have been verified before we got here... */
+ BUG_ON(!recs);
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ kaddr = kmap_atomic(page);
+ for (i = 0; i < recs; i++)
+ post_read_mst_fixup((NTFS_RECORD*)(kaddr +
+ i * rec_size), rec_size);
+ kunmap_atomic(kaddr);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ flush_dcache_page(page);
+ if (likely(page_uptodate && !PageError(page)))
+ SetPageUptodate(page);
+@@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
+ unlock_page(page);
+ return;
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+
+ /**
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index ac0df4dde823..ad1a4723ffdd 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1819,7 +1819,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
+
+ child = d_hash_and_lookup(dir, &qname);
+ if (!child) {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ child = d_alloc_parallel(dir, &qname, &wq);
+ if (IS_ERR(child))
+ goto end_instantiate;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 1b93650dda2f..c553bf3ea541 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -627,7 +627,7 @@ static bool proc_sys_fill_cache(struct file *file,
+
+ child = d_lookup(dir, &qname);
+ if (!child) {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ child = d_alloc_parallel(dir, &qname, &wq);
+ if (IS_ERR(child))
+ return false;
+diff --git a/fs/timerfd.c b/fs/timerfd.c
+index 9ae4abb4110b..8644b67c48fd 100644
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -460,7 +460,10 @@ static int do_timerfd_settime(int ufd, int flags,
+ break;
+ }
+ spin_unlock_irq(&ctx->wqh.lock);
+- cpu_relax();
++ if (isalarm(ctx))
++ hrtimer_wait_for_timer(&ctx->t.alarm.timer);
++ else
++ hrtimer_wait_for_timer(&ctx->t.tmr);
+ }
+
+ /*
+diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
+index 93b61b1f2beb..58270adb46ce 100644
+--- a/include/acpi/platform/aclinux.h
++++ b/include/acpi/platform/aclinux.h
+@@ -131,6 +131,7 @@
+
+ #define acpi_cache_t struct kmem_cache
+ #define acpi_spinlock spinlock_t *
++#define acpi_raw_spinlock raw_spinlock_t *
+ #define acpi_cpu_flags unsigned long
+
+ /* Use native linux version of acpi_os_allocate_zeroed */
+@@ -149,6 +150,20 @@
+ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
+ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
+
++#define acpi_os_create_raw_lock(__handle) \
++({ \
++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
++ \
++ if (lock) { \
++ *(__handle) = lock; \
++ raw_spin_lock_init(*(__handle)); \
++ } \
++ lock ? AE_OK : AE_NO_MEMORY; \
++ })
++
++#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
++
++
+ /*
+ * OSL interfaces used by debugger/disassembler
+ */
+diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
+index 6f96247226a4..fa53a21263c2 100644
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -215,6 +215,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+ # define WARN_ON_SMP(x) ({0;})
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define BUG_ON_RT(c) BUG_ON(c)
++# define BUG_ON_NONRT(c) do { } while (0)
++# define WARN_ON_RT(condition) WARN_ON(condition)
++# define WARN_ON_NONRT(condition) do { } while (0)
++# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
++#else
++# define BUG_ON_RT(c) do { } while (0)
++# define BUG_ON_NONRT(c) BUG_ON(c)
++# define WARN_ON_RT(condition) do { } while (0)
++# define WARN_ON_NONRT(condition) WARN_ON(condition)
++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index e43bbffb5b7a..c23892264109 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -222,6 +222,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
+
+ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+ struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
++void __blk_mq_complete_request_remote_work(struct work_struct *work);
+
+ int blk_mq_request_started(struct request *rq);
+ void blk_mq_start_request(struct request *rq);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index e79055c8b577..8583c1af14ad 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -89,6 +89,7 @@ struct request {
+ struct list_head queuelist;
+ union {
+ struct call_single_data csd;
++ struct work_struct work;
+ u64 fifo_time;
+ };
+
+@@ -467,7 +468,7 @@ struct request_queue {
+ struct throtl_data *td;
+ #endif
+ struct rcu_head rcu_head;
+- wait_queue_head_t mq_freeze_wq;
++ struct swait_queue_head mq_freeze_wq;
+ struct percpu_ref q_usage_counter;
+ struct list_head all_q_node;
+
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index 8fdcb783197d..d07dbeec7bc1 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -3,6 +3,39 @@
+
+ #include <linux/preempt.h>
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++extern void __local_bh_disable(void);
++extern void _local_bh_enable(void);
++extern void __local_bh_enable(void);
++
++static inline void local_bh_disable(void)
++{
++ __local_bh_disable();
++}
++
++static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_disable();
++}
++
++static inline void local_bh_enable(void)
++{
++ __local_bh_enable();
++}
++
++static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_enable();
++}
++
++static inline void local_bh_enable_ip(unsigned long ip)
++{
++ __local_bh_enable();
++}
++
++#else
++
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+ #else
+@@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
+ {
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+ }
++#endif
+
+ #endif /* _LINUX_BH_H */
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index ebbacd14d450..be5e87f6360a 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -75,8 +75,50 @@ struct buffer_head {
+ struct address_space *b_assoc_map; /* mapping this buffer is
+ associated with */
+ atomic_t b_count; /* users using this buffer_head */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t b_uptodate_lock;
++#if IS_ENABLED(CONFIG_JBD2)
++ spinlock_t b_state_lock;
++ spinlock_t b_journal_head_lock;
++#endif
++#endif
+ };
+
++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
++{
++ unsigned long flags;
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++ local_irq_save(flags);
++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
++#else
++ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
++#endif
++ return flags;
++}
++
++static inline void
++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
++ local_irq_restore(flags);
++#else
++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
++#endif
++}
++
++static inline void buffer_head_init_locks(struct buffer_head *bh)
++{
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spin_lock_init(&bh->b_uptodate_lock);
++#if IS_ENABLED(CONFIG_JBD2)
++ spin_lock_init(&bh->b_state_lock);
++ spin_lock_init(&bh->b_journal_head_lock);
++#endif
++#endif
++}
++
+ /*
+ * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
+ * and buffer_foo() functions.
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 5b17de62c962..56027cc01a56 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -16,6 +16,7 @@
+ #include <linux/percpu-refcount.h>
+ #include <linux/percpu-rwsem.h>
+ #include <linux/workqueue.h>
++#include <linux/swork.h>
+
+ #ifdef CONFIG_CGROUPS
+
+@@ -137,6 +138,7 @@ struct cgroup_subsys_state {
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
++ struct swork_event destroy_swork;
+ };
+
+ /*
+diff --git a/include/linux/completion.h b/include/linux/completion.h
+index 5d5aaae3af43..3bca1590e29f 100644
+--- a/include/linux/completion.h
++++ b/include/linux/completion.h
+@@ -7,8 +7,7 @@
+ * Atomic wait-for-completion handler data structures.
+ * See kernel/sched/completion.c for details.
+ */
+-
+-#include <linux/wait.h>
++#include <linux/swait.h>
+
+ /*
+ * struct completion - structure used to maintain state for a "completion"
+@@ -24,11 +23,11 @@
+ */
+ struct completion {
+ unsigned int done;
+- wait_queue_head_t wait;
++ struct swait_queue_head wait;
+ };
+
+ #define COMPLETION_INITIALIZER(work) \
+- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
++ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+
+ #define COMPLETION_INITIALIZER_ONSTACK(work) \
+ ({ init_completion(&work); work; })
+@@ -73,7 +72,7 @@ struct completion {
+ static inline void init_completion(struct completion *x)
+ {
+ x->done = 0;
+- init_waitqueue_head(&x->wait);
++ init_swait_queue_head(&x->wait);
+ }
+
+ /**
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 797d9c8e9a1b..6eabd9e8a98b 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -201,6 +201,8 @@ extern void get_online_cpus(void);
+ extern void put_online_cpus(void);
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
++extern void pin_current_cpu(void);
++extern void unpin_current_cpu(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+ #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+@@ -218,6 +220,8 @@ static inline void cpu_hotplug_done(void) {}
+ #define put_online_cpus() do { } while (0)
+ #define cpu_hotplug_disable() do { } while (0)
+ #define cpu_hotplug_enable() do { } while (0)
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 5ff3e9a4fe5f..ed0431599fd7 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -11,6 +11,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/lockref.h>
+ #include <linux/stringhash.h>
++#include <linux/wait.h>
+
+ struct path;
+ struct vfsmount;
+@@ -100,7 +101,7 @@ struct dentry {
+
+ union {
+ struct list_head d_lru; /* LRU list */
+- wait_queue_head_t *d_wait; /* in-lookup ones only */
++ struct swait_queue_head *d_wait; /* in-lookup ones only */
+ };
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
+@@ -230,7 +231,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
+ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+ extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+- wait_queue_head_t *);
++ struct swait_queue_head *);
+ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+ extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+ extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
+diff --git a/include/linux/delay.h b/include/linux/delay.h
+index a6ecb34cf547..37caab306336 100644
+--- a/include/linux/delay.h
++++ b/include/linux/delay.h
+@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
+ msleep(seconds * 1000);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void cpu_chill(void);
++#else
++# define cpu_chill() cpu_relax()
++#endif
++
+ #endif /* defined(_LINUX_DELAY_H) */
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 7d565afe35d2..8e31b4d245d2 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -714,6 +714,7 @@ static inline void __ftrace_enabled_restore(int enabled)
+ #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+ #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+
++#ifdef CONFIG_USING_GET_LOCK_PARENT_IP
+ static inline unsigned long get_lock_parent_ip(void)
+ {
+ unsigned long addr = CALLER_ADDR0;
+@@ -725,6 +726,7 @@ static inline unsigned long get_lock_parent_ip(void)
+ return addr;
+ return CALLER_ADDR2;
+ }
++#endif
+
+ #ifdef CONFIG_IRQSOFF_TRACER
+ extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index bb3f3297062a..a117a33ef72c 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -7,6 +7,7 @@
+ #include <linux/mm.h>
+ #include <linux/uaccess.h>
+ #include <linux/hardirq.h>
++#include <linux/sched.h>
+
+ #include <asm/cacheflush.h>
+
+@@ -65,7 +66,7 @@ static inline void kunmap(struct page *page)
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
++ preempt_disable_nort();
+ pagefault_disable();
+ return page_address(page);
+ }
+@@ -74,7 +75,7 @@ static inline void *kmap_atomic(struct page *page)
+ static inline void __kunmap_atomic(void *addr)
+ {
+ pagefault_enable();
+- preempt_enable();
++ preempt_enable_nort();
+ }
+
+ #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+@@ -86,32 +87,51 @@ static inline void __kunmap_atomic(void *addr)
+
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ DECLARE_PER_CPU(int, __kmap_atomic_idx);
++#endif
+
+ static inline int kmap_atomic_idx_push(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
+-#ifdef CONFIG_DEBUG_HIGHMEM
++# ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx >= KM_TYPE_NR);
+-#endif
++# endif
+ return idx;
++#else
++ current->kmap_idx++;
++ BUG_ON(current->kmap_idx > KM_TYPE_NR);
++ return current->kmap_idx - 1;
++#endif
+ }
+
+ static inline int kmap_atomic_idx(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ return __this_cpu_read(__kmap_atomic_idx) - 1;
++#else
++ return current->kmap_idx - 1;
++#endif
+ }
+
+ static inline void kmap_atomic_idx_pop(void)
+ {
+-#ifdef CONFIG_DEBUG_HIGHMEM
++#ifndef CONFIG_PREEMPT_RT_FULL
++# ifdef CONFIG_DEBUG_HIGHMEM
+ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
+ BUG_ON(idx < 0);
+-#else
++# else
+ __this_cpu_dec(__kmap_atomic_idx);
++# endif
++#else
++ current->kmap_idx--;
++# ifdef CONFIG_DEBUG_HIGHMEM
++ BUG_ON(current->kmap_idx < 0);
++# endif
+ #endif
+ }
+
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index 5e00f80b1535..65d0671f20b4 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -87,6 +87,9 @@ enum hrtimer_restart {
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
++ * @cb_entry: list entry to defer timers from hardirq context
++ * @irqsafe: timer can run in hardirq context
++ * @praecox: timer expiry time if expired at the time of programming
+ * @is_rel: Set if the timer was armed relative
+ * @start_pid: timer statistics field to store the pid of the task which
+ * started the timer
+@@ -103,6 +106,11 @@ struct hrtimer {
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
++ struct list_head cb_entry;
++ int irqsafe;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ ktime_t praecox;
++#endif
+ u8 is_rel;
+ #ifdef CONFIG_TIMER_STATS
+ int start_pid;
+@@ -123,11 +131,7 @@ struct hrtimer_sleeper {
+ struct task_struct *task;
+ };
+
+-#ifdef CONFIG_64BIT
+ # define HRTIMER_CLOCK_BASE_ALIGN 64
+-#else
+-# define HRTIMER_CLOCK_BASE_ALIGN 32
+-#endif
+
+ /**
+ * struct hrtimer_clock_base - the timer base for a specific clock
+@@ -136,6 +140,7 @@ struct hrtimer_sleeper {
+ * timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
+ * @active: red black tree root node for the active timers
++ * @expired: list head for deferred timers.
+ * @get_time: function to retrieve the current time of the clock
+ * @offset: offset of this clock to the monotonic base
+ */
+@@ -144,6 +149,7 @@ struct hrtimer_clock_base {
+ int index;
+ clockid_t clockid;
+ struct timerqueue_head active;
++ struct list_head expired;
+ ktime_t (*get_time)(void);
+ ktime_t offset;
+ } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+@@ -187,6 +193,7 @@ struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ seqcount_t seq;
+ struct hrtimer *running;
++ struct hrtimer *running_soft;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+@@ -203,6 +210,9 @@ struct hrtimer_cpu_base {
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ wait_queue_head_t wait;
++#endif
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ } ____cacheline_aligned;
+
+@@ -412,6 +422,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ }
+
++/* Softirq preemption could deadlock timer removal */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
++#else
++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
++#endif
++
+ /* Query timers: */
+ extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+
+@@ -436,7 +453,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
+ * Helper function to check, whether the timer is running the callback
+ * function
+ */
+-static inline int hrtimer_callback_running(struct hrtimer *timer)
++static inline int hrtimer_callback_running(const struct hrtimer *timer)
+ {
+ return timer->base->cpu_base->running == timer;
+ }
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index 083d61e92706..5899796f50cb 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
+ * Each idr_preload() should be matched with an invocation of this
+ * function. See idr_preload() for details.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++void idr_preload_end(void);
++#else
+ static inline void idr_preload_end(void)
+ {
+ preempt_enable();
+ }
++#endif
+
+ /**
+ * idr_find - return pointer for given id
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index f8834f820ec2..a688d5e19578 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -148,6 +148,12 @@ extern struct task_group root_task_group;
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST .posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ # define INIT_VTIME(tsk) \
+ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
+@@ -239,6 +245,7 @@ extern struct task_group root_task_group;
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ INIT_TIMER_LIST \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index b6683f0ffc9f..c0a351daf736 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -14,6 +14,7 @@
+ #include <linux/hrtimer.h>
+ #include <linux/kref.h>
+ #include <linux/workqueue.h>
++#include <linux/swork.h>
+
+ #include <linux/atomic.h>
+ #include <asm/ptrace.h>
+@@ -61,6 +62,7 @@
+ * interrupt handler after suspending interrupts. For system
+ * wakeup devices users need to implement wakeup detection in
+ * their interrupt handlers.
++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
+ */
+ #define IRQF_SHARED 0x00000080
+ #define IRQF_PROBE_SHARED 0x00000100
+@@ -74,6 +76,7 @@
+ #define IRQF_NO_THREAD 0x00010000
+ #define IRQF_EARLY_RESUME 0x00020000
+ #define IRQF_COND_SUSPEND 0x00040000
++#define IRQF_NO_SOFTIRQ_CALL 0x00080000
+
+ #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
+
+@@ -196,7 +199,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
+ #ifdef CONFIG_LOCKDEP
+ # define local_irq_enable_in_hardirq() do { } while (0)
+ #else
+-# define local_irq_enable_in_hardirq() local_irq_enable()
++# define local_irq_enable_in_hardirq() local_irq_enable_nort()
+ #endif
+
+ extern void disable_irq_nosync(unsigned int irq);
+@@ -216,6 +219,7 @@ extern void resume_device_irqs(void);
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq: Interrupt to which notification applies
+ * @kref: Reference count, for internal use
++ * @swork: Swork item, for internal use
+ * @work: Work item, for internal use
+ * @notify: Function to be called on change. This will be
+ * called in process context.
+@@ -227,7 +231,11 @@ extern void resume_device_irqs(void);
+ struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct swork_event swork;
++#else
+ struct work_struct work;
++#endif
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+ };
+@@ -398,9 +406,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+ bool state);
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++# ifndef CONFIG_PREEMPT_RT_BASE
+ extern bool force_irqthreads;
++# else
++# define force_irqthreads (true)
++# endif
+ #else
+-#define force_irqthreads (0)
++#define force_irqthreads (false)
+ #endif
+
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+@@ -457,9 +469,10 @@ struct softirq_action
+ void (*action)(struct softirq_action *);
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-
++static inline void thread_do_softirq(void) { do_softirq(); }
+ #ifdef __ARCH_HAS_DO_SOFTIRQ
+ void do_softirq_own_stack(void);
+ #else
+@@ -468,13 +481,25 @@ static inline void do_softirq_own_stack(void)
+ __do_softirq();
+ }
+ #endif
++#else
++extern void thread_do_softirq(void);
++#endif
+
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
++#else
++static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++}
++#endif
+
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
++extern void softirq_check_pending_idle(void);
+
+ DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+@@ -496,8 +521,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
+ to be executed on some cpu at least once after this.
+ * If the tasklet is already scheduled, but its execution is still not
+ started, it will be executed only once.
+- * If this tasklet is already running on another CPU (or schedule is called
+- from tasklet itself), it is rescheduled for later.
++ * If this tasklet is already running on another CPU, it is rescheduled
++ for later.
++ * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * Tasklet is strictly serialized wrt itself, but not
+ wrt another tasklets. If client needs some intertask synchronization,
+ he makes it with spinlocks.
+@@ -522,27 +548,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+ enum
+ {
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
++ TASKLET_STATE_PENDING /* Tasklet is pending */
+ };
+
+-#ifdef CONFIG_SMP
++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
++
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
++static inline int tasklet_tryunlock(struct tasklet_struct *t)
++{
++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
++}
++
+ static inline void tasklet_unlock(struct tasklet_struct *t)
+ {
+ smp_mb__before_atomic();
+ clear_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+-}
++extern void tasklet_unlock_wait(struct tasklet_struct *t);
++
+ #else
+ #define tasklet_trylock(t) 1
++#define tasklet_tryunlock(t) 1
+ #define tasklet_unlock_wait(t) do { } while (0)
+ #define tasklet_unlock(t) do { } while (0)
+ #endif
+@@ -591,12 +626,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
+ smp_mb();
+ }
+
+-static inline void tasklet_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic();
+- atomic_dec(&t->count);
+-}
+-
++extern void tasklet_enable(struct tasklet_struct *t);
+ extern void tasklet_kill(struct tasklet_struct *t);
+ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+ extern void tasklet_init(struct tasklet_struct *t,
+@@ -627,6 +657,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+ tasklet_kill(&ttimer->tasklet);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void softirq_early_init(void);
++#else
++static inline void softirq_early_init(void) { }
++#endif
++
+ /*
+ * Autoprobing for irqs:
+ *
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 0ac26c892fe2..ede85f106aef 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -72,6 +72,7 @@ enum irqchip_irq_state;
+ * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
+ * it from the spurious interrupt detection
+ * mechanism and from core side polling.
++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
+ */
+ enum {
+@@ -99,13 +100,14 @@ enum {
+ IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_IS_POLLED = (1 << 18),
+ IRQ_DISABLE_UNLAZY = (1 << 19),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
+ };
+
+ #define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
+- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
++ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
+
+ #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index 47b9ebd4a74f..2543aab05daa 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -16,6 +16,7 @@
+ #define IRQ_WORK_BUSY 2UL
+ #define IRQ_WORK_FLAGS 3UL
+ #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
+
+ struct irq_work {
+ unsigned long flags;
+@@ -51,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
+ static inline void irq_work_run(void) { }
+ #endif
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++void irq_work_tick_soft(void);
++#else
++static inline void irq_work_tick_soft(void) { }
++#endif
++
+ #endif /* _LINUX_IRQ_WORK_H */
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index b51beebf9804..219d9824f762 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -64,6 +64,7 @@ struct irq_desc {
+ unsigned int irqs_unhandled;
+ atomic_t threads_handled;
+ int threads_handled_last;
++ u64 random_ip;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ const struct cpumask *percpu_affinity;
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 5dd1272d1ab2..9b77034f7c5e 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -25,8 +25,6 @@
+ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+ # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+ # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+ # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+ #else
+ # define trace_hardirqs_on() do { } while (0)
+@@ -39,9 +37,15 @@
+ # define trace_softirqs_enabled(p) 0
+ # define trace_hardirq_enter() do { } while (0)
+ # define trace_hardirq_exit() do { } while (0)
++# define INIT_TRACE_IRQFLAGS
++#endif
++
++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
++#else
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+-# define INIT_TRACE_IRQFLAGS
+ #endif
+
+ #if defined(CONFIG_IRQSOFF_TRACER) || \
+@@ -148,4 +152,23 @@
+
+ #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+
++/*
++ * local_irq* variants depending on RT/!RT
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define local_irq_disable_nort() do { } while (0)
++# define local_irq_enable_nort() do { } while (0)
++# define local_irq_save_nort(flags) local_save_flags(flags)
++# define local_irq_restore_nort(flags) (void)(flags)
++# define local_irq_disable_rt() local_irq_disable()
++# define local_irq_enable_rt() local_irq_enable()
++#else
++# define local_irq_disable_nort() local_irq_disable()
++# define local_irq_enable_nort() local_irq_enable()
++# define local_irq_save_nort(flags) local_irq_save(flags)
++# define local_irq_restore_nort(flags) local_irq_restore(flags)
++# define local_irq_disable_rt() do { } while (0)
++# define local_irq_enable_rt() do { } while (0)
++#endif
++
+ #endif
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index dfaa1f4dcb0c..d57dd06544a1 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
+
+ static inline void jbd_lock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_State, &bh->b_state);
++#else
++ spin_lock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_trylock(BH_State, &bh->b_state);
++#else
++ return spin_trylock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_is_locked(BH_State, &bh->b_state);
++#else
++ return spin_is_locked(&bh->b_state_lock);
++#endif
+ }
+
+ static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_State, &bh->b_state);
++#else
++ spin_unlock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
++#else
++ spin_lock(&bh->b_journal_head_lock);
++#endif
+ }
+
+ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
++#else
++ spin_unlock(&bh->b_journal_head_lock);
++#endif
+ }
+
+ #define J_ASSERT(assert) BUG_ON(!(assert))
+diff --git a/include/linux/kdb.h b/include/linux/kdb.h
+index 410decacff8f..0861bebfc188 100644
+--- a/include/linux/kdb.h
++++ b/include/linux/kdb.h
+@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+ extern __printf(1, 2) int kdb_printf(const char *, ...);
+ typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
+
++#define in_kdb_printk() (kdb_trap_printk)
+ extern void kdb_init(int level);
+
+ /* Access to kdb specific polling devices */
+@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+ extern int kdb_unregister(char *);
+ #else /* ! CONFIG_KGDB_KDB */
+ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
++#define in_kdb_printk() (0)
+ static inline void kdb_init(int level) {}
+ static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen) { return 0; }
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index d96a6118d26a..37de2ce2d290 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -194,6 +194,9 @@ extern int _cond_resched(void);
+ */
+ # define might_sleep() \
+ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
++
++# define might_sleep_no_state_check() \
++ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ # define sched_annotate_sleep() (current->task_state_change = 0)
+ #else
+ static inline void ___might_sleep(const char *file, int line,
+@@ -201,6 +204,7 @@ extern int _cond_resched(void);
+ static inline void __might_sleep(const char *file, int line,
+ int preempt_offset) { }
+ # define might_sleep() do { might_resched(); } while (0)
++# define might_sleep_no_state_check() do { might_resched(); } while (0)
+ # define sched_annotate_sleep() do { } while (0)
+ #endif
+
+@@ -491,6 +495,7 @@ extern enum system_states {
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
++ SYSTEM_SUSPEND,
+ } system_state;
+
+ #define TAINT_PROPRIETARY_MODULE 0
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index c92ebd100d9b..6f035f635d0e 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -34,13 +34,30 @@
+ #endif
+
+ struct lglock {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct rt_mutex __percpu *lock;
++#else
+ arch_spinlock_t __percpu *lock;
++#endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_key;
+ struct lockdep_map lock_dep_map;
+ #endif
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define DEFINE_LGLOCK(name) \
++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
++ = __RT_MUTEX_INITIALIZER( name ## _lock); \
++ struct lglock name = { .lock = &name ## _lock }
++
++# define DEFINE_STATIC_LGLOCK(name) \
++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
++ = __RT_MUTEX_INITIALIZER( name ## _lock); \
++ static struct lglock name = { .lock = &name ## _lock }
++
++#else
++
+ #define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+@@ -50,6 +67,7 @@ struct lglock {
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static struct lglock name = { .lock = &name ## _lock }
++#endif
+
+ void lg_lock_init(struct lglock *lg, char *name);
+
+@@ -64,6 +82,12 @@ void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+ void lg_global_lock(struct lglock *lg);
+ void lg_global_unlock(struct lglock *lg);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define lg_global_trylock_relax(name) lg_global_lock(name)
++#else
++void lg_global_trylock_relax(struct lglock *lg);
++#endif
++
+ #else
+ /* When !CONFIG_SMP, map lglock to spinlock */
+ #define lglock spinlock
+diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
+index cb483305e1f5..4e5062316bb6 100644
+--- a/include/linux/list_bl.h
++++ b/include/linux/list_bl.h
+@@ -2,6 +2,7 @@
+ #define _LINUX_LIST_BL_H
+
+ #include <linux/list.h>
++#include <linux/spinlock.h>
+ #include <linux/bit_spinlock.h>
+
+ /*
+@@ -32,13 +33,24 @@
+
+ struct hlist_bl_head {
+ struct hlist_bl_node *first;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ raw_spinlock_t lock;
++#endif
+ };
+
+ struct hlist_bl_node {
+ struct hlist_bl_node *next, **pprev;
+ };
+-#define INIT_HLIST_BL_HEAD(ptr) \
+- ((ptr)->first = NULL)
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++#define INIT_HLIST_BL_HEAD(h) \
++do { \
++ (h)->first = NULL; \
++ raw_spin_lock_init(&(h)->lock); \
++} while (0)
++#else
++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
++#endif
+
+ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
+ {
+@@ -118,12 +130,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
+
+ static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(0, (unsigned long *)b);
++#else
++ raw_spin_lock(&b->lock);
++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
++ __set_bit(0, (unsigned long *)b);
++#endif
++#endif
+ }
+
+ static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ __bit_spin_unlock(0, (unsigned long *)b);
++#else
++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
++ __clear_bit(0, (unsigned long *)b);
++#endif
++ raw_spin_unlock(&b->lock);
++#endif
+ }
+
+ static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
+diff --git a/include/linux/locallock.h b/include/linux/locallock.h
+new file mode 100644
+index 000000000000..845c77f1a5ca
+--- /dev/null
++++ b/include/linux/locallock.h
+@@ -0,0 +1,278 @@
++#ifndef _LINUX_LOCALLOCK_H
++#define _LINUX_LOCALLOCK_H
++
++#include <linux/percpu.h>
++#include <linux/spinlock.h>
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define LL_WARN(cond) WARN_ON(cond)
++#else
++# define LL_WARN(cond) do { } while (0)
++#endif
++
++/*
++ * per cpu lock based substitute for local_irq_*()
++ */
++struct local_irq_lock {
++ spinlock_t lock;
++ struct task_struct *owner;
++ int nestcnt;
++ unsigned long flags;
++};
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
++
++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
++ DECLARE_PER_CPU(struct local_irq_lock, lvar)
++
++#define local_irq_lock_init(lvar) \
++ do { \
++ int __cpu; \
++ for_each_possible_cpu(__cpu) \
++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
++ } while (0)
++
++/*
++ * spin_lock|trylock|unlock_local flavour that does not migrate disable
++ * used for __local_lock|trylock|unlock where get_local_var/put_local_var
++ * already takes care of the migrate_disable/enable
++ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
++# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
++# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
++#else
++# define spin_lock_local(lock) spin_lock(lock)
++# define spin_trylock_local(lock) spin_trylock(lock)
++# define spin_unlock_local(lock) spin_unlock(lock)
++#endif
++
++static inline void __local_lock(struct local_irq_lock *lv)
++{
++ if (lv->owner != current) {
++ spin_lock_local(&lv->lock);
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ }
++ lv->nestcnt++;
++}
++
++#define local_lock(lvar) \
++ do { __local_lock(&get_local_var(lvar)); } while (0)
++
++#define local_lock_on(lvar, cpu) \
++ do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
++
++static inline int __local_trylock(struct local_irq_lock *lv)
++{
++ if (lv->owner != current && spin_trylock_local(&lv->lock)) {
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ lv->nestcnt = 1;
++ return 1;
++ }
++ return 0;
++}
++
++#define local_trylock(lvar) \
++ ({ \
++ int __locked; \
++ __locked = __local_trylock(&get_local_var(lvar)); \
++ if (!__locked) \
++ put_local_var(lvar); \
++ __locked; \
++ })
++
++static inline void __local_unlock(struct local_irq_lock *lv)
++{
++ LL_WARN(lv->nestcnt == 0);
++ LL_WARN(lv->owner != current);
++ if (--lv->nestcnt)
++ return;
++
++ lv->owner = NULL;
++ spin_unlock_local(&lv->lock);
++}
++
++#define local_unlock(lvar) \
++ do { \
++ __local_unlock(this_cpu_ptr(&lvar)); \
++ put_local_var(lvar); \
++ } while (0)
++
++#define local_unlock_on(lvar, cpu) \
++ do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
++
++static inline void __local_lock_irq(struct local_irq_lock *lv)
++{
++ spin_lock_irqsave(&lv->lock, lv->flags);
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ lv->nestcnt = 1;
++}
++
++#define local_lock_irq(lvar) \
++ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
++
++#define local_lock_irq_on(lvar, cpu) \
++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
++
++static inline void __local_unlock_irq(struct local_irq_lock *lv)
++{
++ LL_WARN(!lv->nestcnt);
++ LL_WARN(lv->owner != current);
++ lv->owner = NULL;
++ lv->nestcnt = 0;
++ spin_unlock_irq(&lv->lock);
++}
++
++#define local_unlock_irq(lvar) \
++ do { \
++ __local_unlock_irq(this_cpu_ptr(&lvar)); \
++ put_local_var(lvar); \
++ } while (0)
++
++#define local_unlock_irq_on(lvar, cpu) \
++ do { \
++ __local_unlock_irq(&per_cpu(lvar, cpu)); \
++ } while (0)
++
++static inline int __local_lock_irqsave(struct local_irq_lock *lv)
++{
++ if (lv->owner != current) {
++ __local_lock_irq(lv);
++ return 0;
++ } else {
++ lv->nestcnt++;
++ return 1;
++ }
++}
++
++#define local_lock_irqsave(lvar, _flags) \
++ do { \
++ if (__local_lock_irqsave(&get_local_var(lvar))) \
++ put_local_var(lvar); \
++ _flags = __this_cpu_read(lvar.flags); \
++ } while (0)
++
++#define local_lock_irqsave_on(lvar, _flags, cpu) \
++ do { \
++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
++ _flags = per_cpu(lvar, cpu).flags; \
++ } while (0)
++
++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
++ unsigned long flags)
++{
++ LL_WARN(!lv->nestcnt);
++ LL_WARN(lv->owner != current);
++ if (--lv->nestcnt)
++ return 0;
++
++ lv->owner = NULL;
++ spin_unlock_irqrestore(&lv->lock, lv->flags);
++ return 1;
++}
++
++#define local_unlock_irqrestore(lvar, flags) \
++ do { \
++ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
++ put_local_var(lvar); \
++ } while (0)
++
++#define local_unlock_irqrestore_on(lvar, flags, cpu) \
++ do { \
++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
++ } while (0)
++
++#define local_spin_trylock_irq(lvar, lock) \
++ ({ \
++ int __locked; \
++ local_lock_irq(lvar); \
++ __locked = spin_trylock(lock); \
++ if (!__locked) \
++ local_unlock_irq(lvar); \
++ __locked; \
++ })
++
++#define local_spin_lock_irq(lvar, lock) \
++ do { \
++ local_lock_irq(lvar); \
++ spin_lock(lock); \
++ } while (0)
++
++#define local_spin_unlock_irq(lvar, lock) \
++ do { \
++ spin_unlock(lock); \
++ local_unlock_irq(lvar); \
++ } while (0)
++
++#define local_spin_lock_irqsave(lvar, lock, flags) \
++ do { \
++ local_lock_irqsave(lvar, flags); \
++ spin_lock(lock); \
++ } while (0)
++
++#define local_spin_unlock_irqrestore(lvar, lock, flags) \
++ do { \
++ spin_unlock(lock); \
++ local_unlock_irqrestore(lvar, flags); \
++ } while (0)
++
++#define get_locked_var(lvar, var) \
++ (*({ \
++ local_lock(lvar); \
++ this_cpu_ptr(&var); \
++ }))
++
++#define put_locked_var(lvar, var) local_unlock(lvar);
++
++#define local_lock_cpu(lvar) \
++ ({ \
++ local_lock(lvar); \
++ smp_processor_id(); \
++ })
++
++#define local_unlock_cpu(lvar) local_unlock(lvar)
++
++#else /* PREEMPT_RT_BASE */
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
++
++static inline void local_irq_lock_init(int lvar) { }
++
++#define local_lock(lvar) preempt_disable()
++#define local_unlock(lvar) preempt_enable()
++#define local_lock_irq(lvar) local_irq_disable()
++#define local_lock_irq_on(lvar, cpu) local_irq_disable()
++#define local_unlock_irq(lvar) local_irq_enable()
++#define local_unlock_irq_on(lvar, cpu) local_irq_enable()
++#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
++
++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
++#define local_spin_lock_irqsave(lvar, lock, flags) \
++ spin_lock_irqsave(lock, flags)
++#define local_spin_unlock_irqrestore(lvar, lock, flags) \
++ spin_unlock_irqrestore(lock, flags)
++
++#define get_locked_var(lvar, var) get_cpu_var(var)
++#define put_locked_var(lvar, var) put_cpu_var(var)
++
++#define local_lock_cpu(lvar) get_cpu()
++#define local_unlock_cpu(lvar) put_cpu()
++
++#endif
++
++#endif
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 903200f4ec41..df670d441fc9 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -11,6 +11,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/uprobes.h>
++#include <linux/rcupdate.h>
+ #include <linux/page-flags-layout.h>
+ #include <linux/workqueue.h>
+ #include <asm/page.h>
+@@ -508,6 +509,9 @@ struct mm_struct {
+ bool tlb_flush_pending;
+ #endif
+ struct uprobes_state uprobes_state;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head delayed_drop;
++#endif
+ #ifdef CONFIG_X86_INTEL_MPX
+ /* address of the bounds directory */
+ void __user *bd_addr;
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 2cb7531e7d7a..b3fdfc820216 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -19,6 +19,17 @@
+ #include <asm/processor.h>
+ #include <linux/osq_lock.h>
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++ , .dep_map = { .name = #lockname }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+@@ -99,13 +110,6 @@ do { \
+ static inline void mutex_destroy(struct mutex *lock) {}
+ #endif
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+- , .dep_map = { .name = #lockname }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
+ extern int mutex_trylock(struct mutex *lock);
+ extern void mutex_unlock(struct mutex *lock);
+
++#endif /* !PREEMPT_RT_FULL */
++
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+ #endif /* __LINUX_MUTEX_H */
+diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
+new file mode 100644
+index 000000000000..c38a44b14da5
+--- /dev/null
++++ b/include/linux/mutex_rt.h
+@@ -0,0 +1,84 @@
++#ifndef __LINUX_MUTEX_RT_H
++#define __LINUX_MUTEX_RT_H
++
++#ifndef __LINUX_MUTEX_H
++#error "Please include mutex.h"
++#endif
++
++#include <linux/rtmutex.h>
++
++/* FIXME: Just for __lockfunc */
++#include <linux/spinlock.h>
++
++struct mutex {
++ struct rt_mutex lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __MUTEX_INITIALIZER(mutexname) \
++ { \
++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
++ }
++
++#define DEFINE_MUTEX(mutexname) \
++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
++
++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
++extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
++extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_trylock(struct mutex *lock);
++extern void __lockfunc _mutex_unlock(struct mutex *lock);
++
++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
++#define mutex_lock(l) _mutex_lock(l)
++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
++#define mutex_lock_killable(l) _mutex_lock_killable(l)
++#define mutex_trylock(l) _mutex_trylock(l)
++#define mutex_unlock(l) _mutex_unlock(l)
++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible_nested(l, s)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable_nested(l, s)
++
++# define mutex_lock_nest_lock(lock, nest_lock) \
++do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++} while (0)
++
++#else
++# define mutex_lock_nested(l, s) _mutex_lock(l)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible(l)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable(l)
++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++#endif
++
++# define mutex_init(mutex) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), #mutex, &__key); \
++} while (0)
++
++# define __mutex_init(mutex, name, key) \
++do { \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), name, key); \
++} while (0)
++
++#endif
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index e8d79d4ebcfe..2ae8fa187016 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2409,14 +2409,53 @@ void netdev_freemem(struct net_device *dev);
+ void synchronize_net(void);
+ int init_dummy_netdev(struct net_device *dev);
+
+-DECLARE_PER_CPU(int, xmit_recursion);
+ #define XMIT_RECURSION_LIMIT 10
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int dev_recursion_level(void)
++{
++ return current->xmit_recursion;
++}
++
++static inline int xmit_rec_read(void)
++{
++ return current->xmit_recursion;
++}
++
++static inline void xmit_rec_inc(void)
++{
++ current->xmit_recursion++;
++}
++
++static inline void xmit_rec_dec(void)
++{
++ current->xmit_recursion--;
++}
++
++#else
++
++DECLARE_PER_CPU(int, xmit_recursion);
+
+ static inline int dev_recursion_level(void)
+ {
+ return this_cpu_read(xmit_recursion);
+ }
+
++static inline int xmit_rec_read(void)
++{
++ return __this_cpu_read(xmit_recursion);
++}
++
++static inline void xmit_rec_inc(void)
++{
++ __this_cpu_inc(xmit_recursion);
++}
++
++static inline void xmit_rec_dec(void)
++{
++ __this_cpu_dec(xmit_recursion);
++}
++#endif
++
+ struct net_device *dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+@@ -2794,6 +2833,7 @@ struct softnet_data {
+ unsigned int dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
++ struct sk_buff_head tofree_queue;
+
+ };
+
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 2ad1a2b289b5..b4d10155af54 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/netdevice.h>
+ #include <linux/static_key.h>
++#include <linux/locallock.h>
+ #include <uapi/linux/netfilter/x_tables.h>
+
+ /* Test a struct->invflags and a boolean for inequality */
+@@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_info *info);
+ */
+ DECLARE_PER_CPU(seqcount_t, xt_recseq);
+
++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
++
+ /* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+@@ -320,6 +323,9 @@ static inline unsigned int xt_write_recseq_begin(void)
+ {
+ unsigned int addend;
+
++ /* RT protection */
++ local_lock(xt_write_lock);
++
+ /*
+ * Low order bit of sequence is set if we already
+ * called xt_write_recseq_begin().
+@@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
+ /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
+ smp_wmb();
+ __this_cpu_add(xt_recseq.sequence, addend);
++ local_unlock(xt_write_lock);
+ }
+
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 810124b33327..d54ca43d571f 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -165,7 +165,11 @@ struct nfs_inode {
+
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct semaphore rmdir_sem;
++#else
+ struct rw_semaphore rmdir_sem;
++#endif
+
+ #if IS_ENABLED(CONFIG_NFS_V4)
+ struct nfs4_cached_acl *nfs4_acl;
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 7cc0deee5bde..a20f49ee69ee 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1484,7 +1484,7 @@ struct nfs_unlinkdata {
+ struct nfs_removeargs args;
+ struct nfs_removeres res;
+ struct dentry *dentry;
+- wait_queue_head_t wq;
++ struct swait_queue_head wq;
+ struct rpc_cred *cred;
+ struct nfs_fattr dir_attr;
+ long timeout;
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 4149868de4e6..babe5b9bcb91 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -6,7 +6,7 @@
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+-
++
+ #ifndef _LINUX_NOTIFIER_H
+ #define _LINUX_NOTIFIER_H
+ #include <linux/errno.h>
+@@ -42,9 +42,7 @@
+ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
+ * As compensation, srcu_notifier_chain_unregister() is rather expensive.
+ * SRCU notifier chains should be used when the chain will be called very
+- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
+- * chains are slightly more difficult to use because they require special
+- * runtime initialization.
++ * often but notifier_blocks will seldom be removed.
+ */
+
+ struct notifier_block;
+@@ -90,7 +88,7 @@ struct srcu_notifier_head {
+ (name)->head = NULL; \
+ } while (0)
+
+-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
++/* srcu_notifier_heads must be cleaned up dynamically */
+ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+ #define srcu_cleanup_notifier_head(name) \
+ cleanup_srcu_struct(&(name)->srcu);
+@@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+ .head = NULL }
+ #define RAW_NOTIFIER_INIT(name) { \
+ .head = NULL }
+-/* srcu_notifier_heads cannot be initialized statically */
++
++#define SRCU_NOTIFIER_INIT(name, pcpu) \
++ { \
++ .mutex = __MUTEX_INITIALIZER(name.mutex), \
++ .head = NULL, \
++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
++ }
+
+ #define ATOMIC_NOTIFIER_HEAD(name) \
+ struct atomic_notifier_head name = \
+@@ -115,6 +119,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+ struct raw_notifier_head name = \
+ RAW_NOTIFIER_INIT(name)
+
++#define _SRCU_NOTIFIER_HEAD(name, mod) \
++ static DEFINE_PER_CPU(struct srcu_struct_array, \
++ name##_head_srcu_array); \
++ mod struct srcu_notifier_head name = \
++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
++
++#define SRCU_NOTIFIER_HEAD(name) \
++ _SRCU_NOTIFIER_HEAD(name, )
++
++#define SRCU_NOTIFIER_HEAD_STATIC(name) \
++ _SRCU_NOTIFIER_HEAD(name, static)
++
+ #ifdef __KERNEL__
+
+ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+@@ -184,12 +200,12 @@ static inline int notifier_to_errno(int ret)
+
+ /*
+ * Declared notifiers so far. I can imagine quite a few more chains
+- * over time (eg laptop power reset chains, reboot chain (to clean
++ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+- * low memory chain, screenblank chain (for plug in modular screenblankers)
++ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+-
++
+ /* CPU notfiers are defined in include/linux/cpu.h. */
+
+ /* netdevice notifiers are defined in include/linux/netdevice.h */
+diff --git a/include/linux/percpu.h b/include/linux/percpu.h
+index 56939d3f6e53..1c7e33fc83e4 100644
+--- a/include/linux/percpu.h
++++ b/include/linux/percpu.h
+@@ -18,6 +18,35 @@
+ #define PERCPU_MODULE_RESERVE 0
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++#define get_local_var(var) (*({ \
++ migrate_disable(); \
++ this_cpu_ptr(&var); }))
++
++#define put_local_var(var) do { \
++ (void)&(var); \
++ migrate_enable(); \
++} while (0)
++
++# define get_local_ptr(var) ({ \
++ migrate_disable(); \
++ this_cpu_ptr(var); })
++
++# define put_local_ptr(var) do { \
++ (void)(var); \
++ migrate_enable(); \
++} while (0)
++
++#else
++
++#define get_local_var(var) get_cpu_var(var)
++#define put_local_var(var) put_cpu_var(var)
++#define get_local_ptr(var) get_cpu_ptr(var)
++#define put_local_ptr(var) put_cpu_ptr(var)
++
++#endif
++
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index 23705a53abba..2cc64b779f03 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
+@@ -2,6 +2,7 @@
+ #define _LINUX_PID_H
+
+ #include <linux/rcupdate.h>
++#include <linux/atomic.h>
+
+ enum pid_type
+ {
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 75e4e30677f1..1cfb1cb72354 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -50,7 +50,11 @@
+ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+ #define NMI_OFFSET (1UL << NMI_SHIFT)
+
+-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#else
++# define SOFTIRQ_DISABLE_OFFSET (0)
++#endif
+
+ /* We use the MSB mostly because its available */
+ #define PREEMPT_NEED_RESCHED 0x80000000
+@@ -59,9 +63,15 @@
+ #include <asm/preempt.h>
+
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
++#else
++# define softirq_count() (0UL)
++extern int in_serving_softirq(void);
++#endif
+
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+@@ -72,7 +82,6 @@
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+ /*
+ * Are we in NMI context?
+@@ -91,7 +100,11 @@
+ /*
+ * The preempt_count offset after spin_lock()
+ */
++#if !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
++#else
++#define PREEMPT_LOCK_OFFSET 0
++#endif
+
+ /*
+ * The preempt_count offset needed for things like:
+@@ -140,6 +153,20 @@ extern void preempt_count_sub(int val);
+ #define preempt_count_inc() preempt_count_add(1)
+ #define preempt_count_dec() preempt_count_sub(1)
+
++#ifdef CONFIG_PREEMPT_LAZY
++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
++#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
++#else
++#define add_preempt_lazy_count(val) do { } while (0)
++#define sub_preempt_lazy_count(val) do { } while (0)
++#define inc_preempt_lazy_count() do { } while (0)
++#define dec_preempt_lazy_count() do { } while (0)
++#define preempt_lazy_count() (0)
++#endif
++
+ #ifdef CONFIG_PREEMPT_COUNT
+
+ #define preempt_disable() \
+@@ -148,13 +175,25 @@ do { \
+ barrier(); \
+ } while (0)
+
++#define preempt_lazy_disable() \
++do { \
++ inc_preempt_lazy_count(); \
++ barrier(); \
++} while (0)
++
+ #define sched_preempt_enable_no_resched() \
+ do { \
+ barrier(); \
+ preempt_count_dec(); \
+ } while (0)
+
+-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++# define preempt_check_resched_rt() preempt_check_resched()
++#else
++# define preempt_enable_no_resched() preempt_enable()
++# define preempt_check_resched_rt() barrier();
++#endif
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
+@@ -179,6 +218,13 @@ do { \
+ __preempt_schedule(); \
+ } while (0)
+
++#define preempt_lazy_enable() \
++do { \
++ dec_preempt_lazy_count(); \
++ barrier(); \
++ preempt_check_resched(); \
++} while (0)
++
+ #else /* !CONFIG_PREEMPT */
+ #define preempt_enable() \
+ do { \
+@@ -224,6 +270,7 @@ do { \
+ #define preempt_disable_notrace() barrier()
+ #define preempt_enable_no_resched_notrace() barrier()
+ #define preempt_enable_notrace() barrier()
++#define preempt_check_resched_rt() barrier()
+ #define preemptible() 0
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+@@ -244,10 +291,31 @@ do { \
+ } while (0)
+ #define preempt_fold_need_resched() \
+ do { \
+- if (tif_need_resched()) \
++ if (tif_need_resched_now()) \
+ set_preempt_need_resched(); \
+ } while (0)
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define preempt_disable_rt() preempt_disable()
++# define preempt_enable_rt() preempt_enable()
++# define preempt_disable_nort() barrier()
++# define preempt_enable_nort() barrier()
++# ifdef CONFIG_SMP
++ extern void migrate_disable(void);
++ extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++# define migrate_disable() barrier()
++# define migrate_enable() barrier()
++# endif /* CONFIG_SMP */
++#else
++# define preempt_disable_rt() barrier()
++# define preempt_enable_rt() barrier()
++# define preempt_disable_nort() preempt_disable()
++# define preempt_enable_nort() preempt_enable()
++# define migrate_disable() preempt_disable()
++# define migrate_enable() preempt_enable()
++#endif
++
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+
+ struct preempt_notifier;
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 696a56be7d3e..310aa321ef0c 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -125,9 +125,11 @@ struct va_format {
+ #ifdef CONFIG_EARLY_PRINTK
+ extern asmlinkage __printf(1, 2)
+ void early_printk(const char *fmt, ...);
++extern void printk_kill(void);
+ #else
+ static inline __printf(1, 2) __cold
+ void early_printk(const char *s, ...) { }
++static inline void printk_kill(void) { }
+ #endif
+
+ #ifdef CONFIG_PRINTK_NMI
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 52b97db93830..fd9ea1c68db6 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -289,9 +289,19 @@ unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
+ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+ void ***results, unsigned long *indices,
+ unsigned long first_index, unsigned int max_items);
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int radix_tree_preload(gfp_t gm) { return 0; }
++static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
++static inline int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
++{
++ return 0;
++};
++
++#else
+ int radix_tree_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
++#endif
+ void radix_tree_init(void);
+ void *radix_tree_tag_set(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+@@ -316,7 +326,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
+
+ static inline void radix_tree_preload_end(void)
+ {
+- preempt_enable();
++ preempt_enable_nort();
+ }
+
+ /**
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 3d6e9815cd85..f6e8860b6494 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -20,7 +20,7 @@ struct random_ready_callback {
+ extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq, int irq_flags);
++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern int add_random_ready_callback(struct random_ready_callback *rdy);
+diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
+index e585018498d5..25c64474fc27 100644
+--- a/include/linux/rbtree.h
++++ b/include/linux/rbtree.h
+@@ -31,7 +31,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/stddef.h>
+-#include <linux/rcupdate.h>
++#include <linux/rcu_assign_pointer.h>
+
+ struct rb_node {
+ unsigned long __rb_parent_color;
+diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
+index d076183e49be..36bfb4dd57ae 100644
+--- a/include/linux/rbtree_augmented.h
++++ b/include/linux/rbtree_augmented.h
+@@ -26,6 +26,7 @@
+
+ #include <linux/compiler.h>
+ #include <linux/rbtree.h>
++#include <linux/rcupdate.h>
+
+ /*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h
+new file mode 100644
+index 000000000000..7066962a4379
+--- /dev/null
++++ b/include/linux/rcu_assign_pointer.h
+@@ -0,0 +1,54 @@
++#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
++#define __LINUX_RCU_ASSIGN_POINTER_H__
++#include <linux/compiler.h>
++#include <asm/barrier.h>
++
++/**
++ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
++ * @v: The value to statically initialize with.
++ */
++#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
++
++/**
++ * rcu_assign_pointer() - assign to RCU-protected pointer
++ * @p: pointer to assign to
++ * @v: value to assign (publish)
++ *
++ * Assigns the specified value to the specified RCU-protected
++ * pointer, ensuring that any concurrent RCU readers will see
++ * any prior initialization.
++ *
++ * Inserts memory barriers on architectures that require them
++ * (which is most of them), and also prevents the compiler from
++ * reordering the code that initializes the structure after the pointer
++ * assignment. More importantly, this call documents which pointers
++ * will be dereferenced by RCU read-side code.
++ *
++ * In some special cases, you may use RCU_INIT_POINTER() instead
++ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
++ * to the fact that it does not constrain either the CPU or the compiler.
++ * That said, using RCU_INIT_POINTER() when you should have used
++ * rcu_assign_pointer() is a very bad thing that results in
++ * impossible-to-diagnose memory corruption. So please be careful.
++ * See the RCU_INIT_POINTER() comment header for details.
++ *
++ * Note that rcu_assign_pointer() evaluates each of its arguments only
++ * once, appearances notwithstanding. One of the "extra" evaluations
++ * is in typeof() and the other visible only to sparse (__CHECKER__),
++ * neither of which actually execute the argument. As with most cpp
++ * macros, this execute-arguments-only-once property is important, so
++ * please be careful when making changes to rcu_assign_pointer() and the
++ * other macros that it invokes.
++ */
++#define rcu_assign_pointer(p, v) \
++({ \
++ uintptr_t _r_a_p__v = (uintptr_t)(v); \
++ \
++ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
++ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
++ else \
++ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
++ _r_a_p__v; \
++})
++
++#endif
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 1aa62e1a761b..2a614acb433e 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -46,6 +46,7 @@
+ #include <linux/compiler.h>
+ #include <linux/ktime.h>
+ #include <linux/irqflags.h>
++#include <linux/rcu_assign_pointer.h>
+
+ #include <asm/barrier.h>
+
+@@ -178,6 +179,9 @@ void call_rcu(struct rcu_head *head,
+
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define call_rcu_bh call_rcu
++#else
+ /**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+@@ -201,6 +205,7 @@ void call_rcu(struct rcu_head *head,
+ */
+ void call_rcu_bh(struct rcu_head *head,
+ rcu_callback_t func);
++#endif
+
+ /**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+@@ -301,6 +306,11 @@ void synchronize_rcu(void);
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+ #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++#else
++static inline int sched_rcu_preempt_depth(void) { return 0; }
++#endif
+
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+
+@@ -326,6 +336,8 @@ static inline int rcu_preempt_depth(void)
+ return 0;
+ }
+
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+ /* Internal to kernel */
+@@ -500,7 +512,14 @@ extern struct lockdep_map rcu_callback_map;
+ int debug_lockdep_rcu_enabled(void);
+
+ int rcu_read_lock_held(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int rcu_read_lock_bh_held(void)
++{
++ return rcu_read_lock_held();
++}
++#else
+ int rcu_read_lock_bh_held(void);
++#endif
+
+ /**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+@@ -621,54 +640,6 @@ static inline void rcu_preempt_sleep_check(void)
+ })
+
+ /**
+- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+- * @v: The value to statically initialize with.
+- */
+-#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+-
+-/**
+- * rcu_assign_pointer() - assign to RCU-protected pointer
+- * @p: pointer to assign to
+- * @v: value to assign (publish)
+- *
+- * Assigns the specified value to the specified RCU-protected
+- * pointer, ensuring that any concurrent RCU readers will see
+- * any prior initialization.
+- *
+- * Inserts memory barriers on architectures that require them
+- * (which is most of them), and also prevents the compiler from
+- * reordering the code that initializes the structure after the pointer
+- * assignment. More importantly, this call documents which pointers
+- * will be dereferenced by RCU read-side code.
+- *
+- * In some special cases, you may use RCU_INIT_POINTER() instead
+- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+- * to the fact that it does not constrain either the CPU or the compiler.
+- * That said, using RCU_INIT_POINTER() when you should have used
+- * rcu_assign_pointer() is a very bad thing that results in
+- * impossible-to-diagnose memory corruption. So please be careful.
+- * See the RCU_INIT_POINTER() comment header for details.
+- *
+- * Note that rcu_assign_pointer() evaluates each of its arguments only
+- * once, appearances notwithstanding. One of the "extra" evaluations
+- * is in typeof() and the other visible only to sparse (__CHECKER__),
+- * neither of which actually execute the argument. As with most cpp
+- * macros, this execute-arguments-only-once property is important, so
+- * please be careful when making changes to rcu_assign_pointer() and the
+- * other macros that it invokes.
+- */
+-#define rcu_assign_pointer(p, v) \
+-({ \
+- uintptr_t _r_a_p__v = (uintptr_t)(v); \
+- \
+- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
+- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
+- else \
+- smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
+- _r_a_p__v; \
+-})
+-
+-/**
+ * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+ * @p: The pointer to read
+ *
+@@ -946,10 +917,14 @@ static inline void rcu_read_unlock(void)
+ static inline void rcu_read_lock_bh(void)
+ {
+ local_bh_disable();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_lock();
++#else
+ __acquire(RCU_BH);
+ rcu_lock_acquire(&rcu_bh_lock_map);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
++#endif
+ }
+
+ /*
+@@ -959,10 +934,14 @@ static inline void rcu_read_lock_bh(void)
+ */
+ static inline void rcu_read_unlock_bh(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_unlock();
++#else
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
+ rcu_lock_release(&rcu_bh_lock_map);
+ __release(RCU_BH);
++#endif
+ local_bh_enable();
+ }
+
+diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
+index 63a4e4cf40a5..08ab12df2863 100644
+--- a/include/linux/rcutree.h
++++ b/include/linux/rcutree.h
+@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
+ rcu_note_context_switch();
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define synchronize_rcu_bh synchronize_rcu
++#else
+ void synchronize_rcu_bh(void);
++#endif
+ void synchronize_sched_expedited(void);
+ void synchronize_rcu_expedited(void);
+
+@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void)
+ }
+
+ void rcu_barrier(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define rcu_barrier_bh rcu_barrier
++#else
+ void rcu_barrier_bh(void);
++#endif
+ void rcu_barrier_sched(void);
+ unsigned long get_state_synchronize_rcu(void);
+ void cond_synchronize_rcu(unsigned long oldstate);
+@@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned long oldstate);
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ unsigned long rcu_batches_started(void);
+-unsigned long rcu_batches_started_bh(void);
+ unsigned long rcu_batches_started_sched(void);
+ unsigned long rcu_batches_completed(void);
+-unsigned long rcu_batches_completed_bh(void);
+ unsigned long rcu_batches_completed_sched(void);
+ unsigned long rcu_exp_batches_completed(void);
+ unsigned long rcu_exp_batches_completed_sched(void);
+ void show_rcu_gp_kthreads(void);
+
+ void rcu_force_quiescent_state(void);
+-void rcu_bh_force_quiescent_state(void);
+ void rcu_sched_force_quiescent_state(void);
+
+ void rcu_idle_enter(void);
+@@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_mostly;
+
+ bool rcu_is_watching(void);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++void rcu_bh_force_quiescent_state(void);
++unsigned long rcu_batches_started_bh(void);
++unsigned long rcu_batches_completed_bh(void);
++#else
++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
++# define rcu_batches_completed_bh rcu_batches_completed
++# define rcu_batches_started_bh rcu_batches_completed
++#endif
++
+ void rcu_all_qs(void);
+
+ /* RCUtree hotplug events */
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 1abba5ce2a2f..30211c627511 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -13,11 +13,15 @@
+ #define __LINUX_RT_MUTEX_H
+
+ #include <linux/linkage.h>
++#include <linux/spinlock_types_raw.h>
+ #include <linux/rbtree.h>
+-#include <linux/spinlock_types.h>
+
+ extern int max_lock_depth; /* for sysctl */
+
++#ifdef CONFIG_DEBUG_MUTEXES
++#include <linux/debug_locks.h>
++#endif
++
+ /**
+ * The rt_mutex structure
+ *
+@@ -31,8 +35,8 @@ struct rt_mutex {
+ struct rb_root waiters;
+ struct rb_node *waiters_leftmost;
+ struct task_struct *owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
++#ifdef CONFIG_DEBUG_RT_MUTEXES
+ const char *name, *file;
+ int line;
+ void *magic;
+@@ -55,22 +59,33 @@ struct hrtimer_sleeper;
+ # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+ #endif
+
++# define rt_mutex_init(mutex) \
++ do { \
++ raw_spin_lock_init(&(mutex)->wait_lock); \
++ __rt_mutex_init(mutex, #mutex); \
++ } while (0)
++
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
+ # define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .waiters = RB_ROOT \
+ , .owner = NULL \
+- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
++
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 1 }
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+@@ -91,6 +106,7 @@ extern void rt_mutex_destroy(struct rt_mutex *lock);
+
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock);
+ extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout);
+
+diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
+new file mode 100644
+index 000000000000..49ed2d45d3be
+--- /dev/null
++++ b/include/linux/rwlock_rt.h
+@@ -0,0 +1,99 @@
++#ifndef __LINUX_RWLOCK_RT_H
++#define __LINUX_RWLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#define rwlock_init(rwl) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(rwl)->lock); \
++ __rt_rwlock_init(rwl, #rwl, &__key); \
++} while (0)
++
++extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
++
++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
++
++#define write_trylock_irqsave(lock, flags) \
++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
++
++#define read_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = rt_read_lock_irqsave(lock); \
++ } while (0)
++
++#define write_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = rt_write_lock_irqsave(lock); \
++ } while (0)
++
++#define read_lock(lock) rt_read_lock(lock)
++
++#define read_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_irq(lock) read_lock(lock)
++
++#define write_lock(lock) rt_write_lock(lock)
++
++#define write_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_irq(lock) write_lock(lock)
++
++#define read_unlock(lock) rt_read_unlock(lock)
++
++#define read_unlock_bh(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define read_unlock_irq(lock) read_unlock(lock)
++
++#define write_unlock(lock) rt_write_unlock(lock)
++
++#define write_unlock_bh(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define write_unlock_irq(lock) write_unlock(lock)
++
++#define read_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_read_unlock(lock); \
++ } while (0)
++
++#define write_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_write_unlock(lock); \
++ } while (0)
++
++#endif
+diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
+index cc0072e93e36..5317cd957292 100644
+--- a/include/linux/rwlock_types.h
++++ b/include/linux/rwlock_types.h
+@@ -1,6 +1,10 @@
+ #ifndef __LINUX_RWLOCK_TYPES_H
+ #define __LINUX_RWLOCK_TYPES_H
+
++#if !defined(__LINUX_SPINLOCK_TYPES_H)
++# error "Do not include directly, include spinlock_types.h"
++#endif
++
+ /*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
+new file mode 100644
+index 000000000000..51b28d775fe1
+--- /dev/null
++++ b/include/linux/rwlock_types_rt.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_RWLOCK_TYPES_RT_H
++#define __LINUX_RWLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * rwlocks - rtmutex which allows single reader recursion
++ */
++typedef struct {
++ struct rt_mutex lock;
++ int read_depth;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} rwlock_t;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++#define __RW_LOCK_UNLOCKED(name) \
++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name = __RW_LOCK_UNLOCKED(name)
++
++#endif
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index dd1d14250340..8e1f44ff1f2f 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -19,6 +19,10 @@
+ #include <linux/osq_lock.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ struct rw_semaphore;
+
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+@@ -184,4 +188,6 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
+ # define up_read_non_owner(sem) up_read(sem)
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
+diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
+new file mode 100644
+index 000000000000..e26bd95a57c3
+--- /dev/null
++++ b/include/linux/rwsem_rt.h
+@@ -0,0 +1,167 @@
++#ifndef _LINUX_RWSEM_RT_H
++#define _LINUX_RWSEM_RT_H
++
++#ifndef _LINUX_RWSEM_H
++#error "Include rwsem.h"
++#endif
++
++/*
++ * RW-semaphores are a spinlock plus a reader-depth count.
++ *
++ * Note that the semantics are different from the usual
++ * Linux rw-sems, in PREEMPT_RT mode we do not allow
++ * multiple readers to hold the lock at once, we only allow
++ * a read-lock owner to read-lock recursively. This is
++ * better for latency, makes the implementation inherently
++ * fair and makes it simpler as well.
++ */
++
++#include <linux/rtmutex.h>
++
++struct rw_semaphore {
++ struct rt_mutex lock;
++ int read_depth;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __RWSEM_INITIALIZER(name) \
++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(lockname) \
++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
++
++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
++ struct lock_class_key *key);
++
++#define __rt_init_rwsem(sem, name, key) \
++ do { \
++ rt_mutex_init(&(sem)->lock); \
++ __rt_rwsem_init((sem), (name), (key));\
++ } while (0)
++
++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
++
++# define rt_init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __rt_init_rwsem((sem), #sem, &__key); \
++} while (0)
++
++extern void rt_down_write(struct rw_semaphore *rwsem);
++extern int rt_down_write_killable(struct rw_semaphore *rwsem);
++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern int rt_down_write_killable_nested(struct rw_semaphore *rwsem,
++ int subclass);
++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
++ struct lockdep_map *nest);
++extern void rt__down_read(struct rw_semaphore *rwsem);
++extern void rt_down_read(struct rw_semaphore *rwsem);
++extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
++extern int rt__down_read_trylock(struct rw_semaphore *rwsem);
++extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
++extern void __rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_write(struct rw_semaphore *rwsem);
++extern void rt_downgrade_write(struct rw_semaphore *rwsem);
++
++#define init_rwsem(sem) rt_init_rwsem(sem)
++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
++
++static inline int rwsem_is_contended(struct rw_semaphore *sem)
++{
++ /* rt_mutex_has_waiters() */
++ return !RB_EMPTY_ROOT(&sem->lock.waiters);
++}
++
++static inline void __down_read(struct rw_semaphore *sem)
++{
++ rt__down_read(sem);
++}
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++ rt_down_read(sem);
++}
++
++static inline int __down_read_trylock(struct rw_semaphore *sem)
++{
++ return rt__down_read_trylock(sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_read_trylock(sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++ rt_down_write(sem);
++}
++
++static inline int down_write_killable(struct rw_semaphore *sem)
++{
++ return rt_down_write_killable(sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_write_trylock(sem);
++}
++
++static inline void __up_read(struct rw_semaphore *sem)
++{
++ __rt_up_read(sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++ rt_up_read(sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++ rt_up_write(sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++ rt_downgrade_write(sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++ return rt_down_read_nested(sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++ rt_down_write_nested(sem, subclass);
++}
++
++static inline int down_write_killable_nested(struct rw_semaphore *sem,
++ int subclass)
++{
++ return rt_down_write_killable_nested(sem, subclass);
++}
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static inline void down_write_nest_lock(struct rw_semaphore *sem,
++ struct rw_semaphore *nest_lock)
++{
++ rt_down_write_nested_lock(sem, &nest_lock->dep_map);
++}
++
++#else
++
++static inline void down_write_nest_lock(struct rw_semaphore *sem,
++ struct rw_semaphore *nest_lock)
++{
++ rt_down_write_nested_lock(sem, NULL);
++}
++#endif
++#endif
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 62c68e513e39..c873ce0183ab 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -26,6 +26,7 @@ struct sched_param {
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
+ #include <linux/preempt.h>
++#include <asm/kmap_types.h>
+
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -243,10 +244,7 @@ extern char ___assert_task_state[1 - 2*!!(
+ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+
+-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+ #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+-#define task_is_stopped_or_traced(task) \
+- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+ #define task_contributes_to_load(task) \
+ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
+ (task->flags & PF_FROZEN) == 0 && \
+@@ -312,6 +310,11 @@ extern char ___assert_task_state[1 - 2*!!(
+
+ #endif
+
++#define __set_current_state_no_track(state_value) \
++ do { current->state = (state_value); } while (0)
++#define set_current_state_no_track(state_value) \
++ set_mb(current->state, (state_value))
++
+ /* Task command name length */
+ #define TASK_COMM_LEN 16
+
+@@ -1009,8 +1012,18 @@ struct wake_q_head {
+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+
+ extern void wake_q_add(struct wake_q_head *head,
+- struct task_struct *task);
+-extern void wake_up_q(struct wake_q_head *head);
++ struct task_struct *task);
++extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
++
++static inline void wake_up_q(struct wake_q_head *head)
++{
++ __wake_up_q(head, false);
++}
++
++static inline void wake_up_q_sleeper(struct wake_q_head *head)
++{
++ __wake_up_q(head, true);
++}
+
+ /*
+ * sched-domains (multiprocessor balancing) declarations:
+@@ -1459,6 +1472,7 @@ struct tlbflush_unmap_batch {
+
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ volatile long saved_state; /* saved state for "spinlock sleepers" */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+@@ -1495,6 +1509,12 @@ struct task_struct {
+ #endif
+
+ unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int migrate_disable;
++# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++# endif
++#endif
+ int nr_cpus_allowed;
+ cpumask_t cpus_allowed;
+
+@@ -1629,6 +1649,9 @@ struct task_struct {
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *posix_timer_list;
++#endif
+
+ /* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+@@ -1659,10 +1682,15 @@ struct task_struct {
+ /* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
++ struct sigqueue *sigqueue_cache;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+ struct sigpending pending;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* TODO: move me into ->restart_block ? */
++ struct siginfo forced_info;
++#endif
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+@@ -1891,6 +1919,12 @@ struct task_struct {
+ /* bitmask and counter of trace recursion */
+ unsigned long trace_recursion;
+ #endif /* CONFIG_TRACING */
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ u64 preempt_timestamp_hist;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ long timer_offset;
++#endif
++#endif
+ #ifdef CONFIG_KCOV
+ /* Coverage collection mode enabled for this task (0 if disabled). */
+ enum kcov_mode kcov_mode;
+@@ -1916,9 +1950,23 @@ struct task_struct {
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++ int softirq_nestcnt;
++ unsigned int softirqs_raised;
++#endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
++# endif
++#endif
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int xmit_recursion;
++#endif
+ int pagefault_disabled;
+ #ifdef CONFIG_MMU
+ struct task_struct *oom_reaper_list;
+@@ -1939,14 +1987,6 @@ extern int arch_task_struct_size __read_mostly;
+ # define arch_task_struct_size (sizeof(struct task_struct))
+ #endif
+
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+-
+-static inline int tsk_nr_cpus_allowed(struct task_struct *p)
+-{
+- return p->nr_cpus_allowed;
+-}
+-
+ #define TNF_MIGRATED 0x01
+ #define TNF_NO_GROUP 0x02
+ #define TNF_SHARED 0x04
+@@ -2162,6 +2202,15 @@ extern struct pid *cad_pid;
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -2169,6 +2218,7 @@ static inline void put_task_struct(struct task_struct *t)
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
++#endif
+
+ struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+ struct task_struct *try_get_task_struct(struct task_struct **ptask);
+@@ -2210,6 +2260,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
+ /*
+ * Per process flags
+ */
++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
+ #define PF_EXITING 0x00000004 /* getting shut down */
+ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
+ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+@@ -2378,6 +2429,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
+
+ extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask);
++int migrate_me(void);
++void tell_sched_cpu_down_begin(int cpu);
++void tell_sched_cpu_down_done(int cpu);
++
+ #else
+ static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+@@ -2390,6 +2445,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ return -EINVAL;
+ return 0;
+ }
++static inline int migrate_me(void) { return 0; }
++static inline void tell_sched_cpu_down_begin(int cpu) { }
++static inline void tell_sched_cpu_down_done(int cpu) { }
+ #endif
+
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -2624,6 +2682,7 @@ extern void xtime_update(unsigned long ticks);
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+@@ -2832,6 +2891,17 @@ static inline void mmdrop(struct mm_struct *mm)
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ static inline bool mmget_not_zero(struct mm_struct *mm)
+ {
+ return atomic_inc_not_zero(&mm->mm_users);
+@@ -3168,6 +3238,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
++}
++
++static inline int need_resched_lazy(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++#else
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
++static inline int need_resched_lazy(void) { return 0; }
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++#endif
++
+ static inline int restart_syscall(void)
+ {
+ set_tsk_thread_flag(current, TIF_SIGPENDING);
+@@ -3199,6 +3306,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
+ return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+ }
+
++static inline bool __task_is_stopped_or_traced(struct task_struct *task)
++{
++ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#endif
++ return false;
++}
++
++static inline bool task_is_stopped_or_traced(struct task_struct *task)
++{
++ bool traced_stopped;
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ traced_stopped = __task_is_stopped_or_traced(task);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++#else
++ traced_stopped = __task_is_stopped_or_traced(task);
++#endif
++ return traced_stopped;
++}
++
++static inline bool task_is_traced(struct task_struct *task)
++{
++ bool traced = false;
++
++ if (task->state & __TASK_TRACED)
++ return true;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* in case the task is sleeping on tasklist_lock */
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->state & __TASK_TRACED)
++ traced = true;
++ else if (task->saved_state & __TASK_TRACED)
++ traced = true;
++ raw_spin_unlock_irq(&task->pi_lock);
++#endif
++ return traced;
++}
++
+ /*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+@@ -3220,12 +3372,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
+ __cond_resched_lock(lock); \
+ })
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
+
+ #define cond_resched_softirq() ({ \
+ ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
+
+ static inline void cond_resched_rcu(void)
+ {
+@@ -3387,6 +3543,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+
+ #endif /* CONFIG_SMP */
+
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
++ return 0;
++#endif
++}
++
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++ if (__migrate_disabled(p))
++ return cpumask_of(task_cpu(p));
++
++ return &p->cpus_allowed;
++}
++
++static inline int tsk_nr_cpus_allowed(struct task_struct *p)
++{
++ if (__migrate_disabled(p))
++ return 1;
++ return p->nr_cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index ead97654c4e9..3d7223ffdd3b 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -220,20 +220,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+ return __read_seqcount_retry(s, start);
+ }
+
+-
+-
+-static inline void raw_write_seqcount_begin(seqcount_t *s)
++static inline void __raw_write_seqcount_begin(seqcount_t *s)
+ {
+ s->sequence++;
+ smp_wmb();
+ }
+
+-static inline void raw_write_seqcount_end(seqcount_t *s)
++static inline void raw_write_seqcount_begin(seqcount_t *s)
++{
++ preempt_disable_rt();
++ __raw_write_seqcount_begin(s);
++}
++
++static inline void __raw_write_seqcount_end(seqcount_t *s)
+ {
+ smp_wmb();
+ s->sequence++;
+ }
+
++static inline void raw_write_seqcount_end(seqcount_t *s)
++{
++ __raw_write_seqcount_end(s);
++ preempt_enable_rt();
++}
++
+ /**
+ * raw_write_seqcount_barrier - do a seq write barrier
+ * @s: pointer to seqcount_t
+@@ -428,10 +438,32 @@ typedef struct {
+ /*
+ * Read side functions for starting and finalizing a read side section.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+ return read_seqcount_begin(&sl->seqcount);
+ }
++#else
++/*
++ * Starvation safe read side for RT
++ */
++static inline unsigned read_seqbegin(seqlock_t *sl)
++{
++ unsigned ret;
++
++repeat:
++ ret = ACCESS_ONCE(sl->seqcount.sequence);
++ if (unlikely(ret & 1)) {
++ /*
++ * Take the lock and let the writer proceed (i.e. evtl
++ * boost it), otherwise we could loop here forever.
++ */
++ spin_unlock_wait(&sl->lock);
++ goto repeat;
++ }
++ return ret;
++}
++#endif
+
+ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ {
+@@ -446,36 +478,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ static inline void write_seqlock(seqlock_t *sl)
+ {
+ spin_lock(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
++}
++
++static inline int try_write_seqlock(seqlock_t *sl)
++{
++ if (spin_trylock(&sl->lock)) {
++ __raw_write_seqcount_begin(&sl->seqcount);
++ return 1;
++ }
++ return 0;
+ }
+
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock(&sl->lock);
+ }
+
+ static inline void write_seqlock_bh(seqlock_t *sl)
+ {
+ spin_lock_bh(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_bh(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock_bh(&sl->lock);
+ }
+
+ static inline void write_seqlock_irq(seqlock_t *sl)
+ {
+ spin_lock_irq(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_irq(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock_irq(&sl->lock);
+ }
+
+@@ -484,7 +525,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ return flags;
+ }
+
+@@ -494,7 +535,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ static inline void
+ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+ }
+
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index b63f63eaa39c..295540fdfc72 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -233,6 +233,7 @@ static inline void init_sigpending(struct sigpending *sig)
+ }
+
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
+
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 0f665cb26b50..59c38d1635c8 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -284,6 +284,7 @@ struct sk_buff_head {
+
+ __u32 qlen;
+ spinlock_t lock;
++ raw_spinlock_t raw_lock;
+ };
+
+ struct sk_buff;
+@@ -1565,6 +1566,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
+ __skb_queue_head_init(list);
+ }
+
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++ raw_spin_lock_init(&list->raw_lock);
++ __skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+ {
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index eccae4690f41..64ec52d951c3 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -185,6 +185,9 @@ static inline void smp_init(void) { }
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
++
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 47dd0cebd204..02928fa5499d 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+
+ /*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ # include <linux/spinlock_api_up.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ /*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+@@ -347,6 +355,12 @@ static __always_inline void spin_unlock(spinlock_t *lock)
+ raw_spin_unlock(&lock->rlock);
+ }
+
++static __always_inline int spin_unlock_no_deboost(spinlock_t *lock)
++{
++ raw_spin_unlock(&lock->rlock);
++ return 0;
++}
++
+ static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ {
+ raw_spin_unlock_bh(&lock->rlock);
+@@ -416,4 +430,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+ #define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* __LINUX_SPINLOCK_H */
+diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
+index 5344268e6e62..043263f30e81 100644
+--- a/include/linux/spinlock_api_smp.h
++++ b/include/linux/spinlock_api_smp.h
+@@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+new file mode 100644
+index 000000000000..7eb87584e843
+--- /dev/null
++++ b/include/linux/spinlock_rt.h
+@@ -0,0 +1,165 @@
++#ifndef __LINUX_SPINLOCK_RT_H
++#define __LINUX_SPINLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#include <linux/bug.h>
++
++extern void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++
++#define spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key); \
++} while (0)
++
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
++
++extern void __lockfunc rt_spin_lock(spinlock_t *lock);
++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
++extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock);
++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
++
++/*
++ * lockdep-less calls, for derived types like rwlock:
++ * (for trylock they can use rt_mutex_trylock() directly.
++ */
++extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
++extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
++
++#define spin_lock(lock) rt_spin_lock(lock)
++
++#define spin_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_irq(lock) spin_lock(lock)
++
++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#define spin_trylock(lock) \
++({ \
++ int __locked; \
++ __locked = spin_do_trylock(lock); \
++ __locked; \
++})
++
++#ifdef CONFIG_LOCKDEP
++# define spin_lock_nested(lock, subclass) \
++ do { \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++#define spin_lock_bh_nested(lock, subclass) \
++ do { \
++ local_bh_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++#else
++# define spin_lock_nested(lock, subclass) spin_lock(lock)
++# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++#endif
++
++#define spin_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++
++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
++{
++ unsigned long flags = 0;
++#ifdef CONFIG_TRACE_IRQFLAGS
++ flags = rt_spin_lock_trace_flags(lock);
++#else
++ spin_lock(lock); /* lock_local */
++#endif
++ return flags;
++}
++
++/* FIXME: we need rt_spin_lock_nest_lock */
++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
++
++#define spin_unlock(lock) rt_spin_unlock(lock)
++#define spin_unlock_no_deboost(lock) rt_spin_unlock_no_deboost(lock)
++
++#define spin_unlock_bh(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define spin_unlock_irq(lock) spin_unlock(lock)
++
++#define spin_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ spin_unlock(lock); \
++ } while (0)
++
++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
++#define spin_trylock_irq(lock) spin_trylock(lock)
++
++#define spin_trylock_irqsave(lock, flags) \
++ rt_spin_trylock_irqsave(lock, &(flags))
++
++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
++
++#ifdef CONFIG_GENERIC_LOCKBREAK
++# define spin_is_contended(lock) ((lock)->break_lock)
++#else
++# define spin_is_contended(lock) (((void)(lock), 0))
++#endif
++
++static inline int spin_can_lock(spinlock_t *lock)
++{
++ return !rt_mutex_is_locked(&lock->lock);
++}
++
++static inline int spin_is_locked(spinlock_t *lock)
++{
++ return rt_mutex_is_locked(&lock->lock);
++}
++
++static inline void assert_spin_locked(spinlock_t *lock)
++{
++ BUG_ON(!spin_is_locked(lock));
++}
++
++#define atomic_dec_and_lock(atomic, lock) \
++ atomic_dec_and_spin_lock(atomic, lock)
++
++#endif
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 73548eb13a5d..10bac715ea96 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -9,80 +9,15 @@
+ * Released under the General Public License (GPL).
+ */
+
+-#if defined(CONFIG_SMP)
+-# include <asm/spinlock_types.h>
++#include <linux/spinlock_types_raw.h>
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
+ #else
+-# include <linux/spinlock_types_up.h>
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
+ #endif
+
+-#include <linux/lockdep.h>
+-
+-typedef struct raw_spinlock {
+- arch_spinlock_t raw_lock;
+-#ifdef CONFIG_GENERIC_LOCKBREAK
+- unsigned int break_lock;
+-#endif
+-#ifdef CONFIG_DEBUG_SPINLOCK
+- unsigned int magic, owner_cpu;
+- void *owner;
+-#endif
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- struct lockdep_map dep_map;
+-#endif
+-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC 0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT ((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+-#else
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
+-
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname) \
+- .magic = SPINLOCK_MAGIC, \
+- .owner_cpu = -1, \
+- .owner = SPINLOCK_OWNER_INIT,
+-#else
+-# define SPIN_DEBUG_INIT(lockname)
+-#endif
+-
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+-
+-typedef struct spinlock {
+- union {
+- struct raw_spinlock rlock;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+- struct {
+- u8 __padding[LOCK_PADSIZE];
+- struct lockdep_map dep_map;
+- };
+-#endif
+- };
+-} spinlock_t;
+-
+-#define __SPIN_LOCK_INITIALIZER(lockname) \
+- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+-
+-#define __SPIN_LOCK_UNLOCKED(lockname) \
+- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+-
+-#include <linux/rwlock_types.h>
+-
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
+new file mode 100644
+index 000000000000..f1dac1fb1d6a
+--- /dev/null
++++ b/include/linux/spinlock_types_nort.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
++#define __LINUX_SPINLOCK_TYPES_NORT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * The non RT version maps spinlocks to raw_spinlocks
++ */
++typedef struct spinlock {
++ union {
++ struct raw_spinlock rlock;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
++ struct {
++ u8 __padding[LOCK_PADSIZE];
++ struct lockdep_map dep_map;
++ };
++#endif
++ };
++} spinlock_t;
++
++#define __SPIN_LOCK_INITIALIZER(lockname) \
++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
++
++#define __SPIN_LOCK_UNLOCKED(lockname) \
++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++
++#endif
+diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
+new file mode 100644
+index 000000000000..edffc4d53fc9
+--- /dev/null
++++ b/include/linux/spinlock_types_raw.h
+@@ -0,0 +1,56 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
++#define __LINUX_SPINLOCK_TYPES_RAW_H
++
++#if defined(CONFIG_SMP)
++# include <asm/spinlock_types.h>
++#else
++# include <linux/spinlock_types_up.h>
++#endif
++
++#include <linux/lockdep.h>
++
++typedef struct raw_spinlock {
++ arch_spinlock_t raw_lock;
++#ifdef CONFIG_GENERIC_LOCKBREAK
++ unsigned int break_lock;
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++ unsigned int magic, owner_cpu;
++ void *owner;
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} raw_spinlock_t;
++
++#define SPINLOCK_MAGIC 0xdead4ead
++
++#define SPINLOCK_OWNER_INIT ((void *)-1L)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define SPIN_DEP_MAP_INIT(lockname)
++#endif
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define SPIN_DEBUG_INIT(lockname) \
++ .magic = SPINLOCK_MAGIC, \
++ .owner_cpu = -1, \
++ .owner = SPINLOCK_OWNER_INIT,
++#else
++# define SPIN_DEBUG_INIT(lockname)
++#endif
++
++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
++ { \
++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
++ SPIN_DEBUG_INIT(lockname) \
++ SPIN_DEP_MAP_INIT(lockname) }
++
++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
++
++#endif
+diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
+new file mode 100644
+index 000000000000..3e3d8c5f7a9a
+--- /dev/null
++++ b/include/linux/spinlock_types_rt.h
+@@ -0,0 +1,48 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RT_H
++#define __LINUX_SPINLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++#include <linux/cache.h>
++
++/*
++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
++ */
++typedef struct spinlock {
++ struct rt_mutex lock;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} spinlock_t;
++
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ .file = __FILE__, \
++ .line = __LINE__ , \
++ }
++#else
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ }
++#endif
++
++/*
++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
++*/
++
++#define __SPIN_LOCK_UNLOCKED(name) \
++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
++ SPIN_DEP_MAP_INIT(name) }
++
++#define DEFINE_SPINLOCK(name) \
++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
++
++#endif
+diff --git a/include/linux/srcu.h b/include/linux/srcu.h
+index dc8eb63c6568..e793d3a257da 100644
+--- a/include/linux/srcu.h
++++ b/include/linux/srcu.h
+@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
+
+ void process_srcu(struct work_struct *work);
+
+-#define __SRCU_STRUCT_INIT(name) \
++#define __SRCU_STRUCT_INIT(name, pcpu_name) \
+ { \
+ .completed = -300, \
+- .per_cpu_ref = &name##_srcu_array, \
++ .per_cpu_ref = &pcpu_name, \
+ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
+ .running = false, \
+ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
+@@ -119,7 +119,7 @@ void process_srcu(struct work_struct *work);
+ */
+ #define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
++ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
+ #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+ #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+diff --git a/include/linux/suspend.h b/include/linux/suspend.h
+index 7693e39b14fe..b36eedeb28d1 100644
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -193,6 +193,12 @@ struct platform_freeze_ops {
+ void (*end)(void);
+ };
+
++#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
++extern bool pm_in_action;
++#else
++# define pm_in_action false
++#endif
++
+ #ifdef CONFIG_SUSPEND
+ /**
+ * suspend_set_ops - set platform dependent suspend operations
+diff --git a/include/linux/swait.h b/include/linux/swait.h
+index c1f9c62a8a50..83f004a72320 100644
+--- a/include/linux/swait.h
++++ b/include/linux/swait.h
+@@ -87,6 +87,7 @@ static inline int swait_active(struct swait_queue_head *q)
+ extern void swake_up(struct swait_queue_head *q);
+ extern void swake_up_all(struct swait_queue_head *q);
+ extern void swake_up_locked(struct swait_queue_head *q);
++extern void swake_up_all_locked(struct swait_queue_head *q);
+
+ extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+ extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index e1d761463243..4ae9a4434ad3 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -11,6 +11,7 @@
+ #include <linux/fs.h>
+ #include <linux/atomic.h>
+ #include <linux/page-flags.h>
++#include <linux/locallock.h>
+ #include <asm/page.h>
+
+ struct notifier_block;
+@@ -243,7 +244,8 @@ struct swap_info_struct {
+ void *workingset_eviction(struct address_space *mapping, struct page *page);
+ bool workingset_refault(void *shadow);
+ void workingset_activation(struct page *page);
+-extern struct list_lru workingset_shadow_nodes;
++extern struct list_lru __workingset_shadow_nodes;
++DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+
+ static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
+ {
+@@ -288,6 +290,7 @@ extern unsigned long nr_free_pagecache_pages(void);
+
+
+ /* linux/mm/swap.c */
++DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
+ extern void lru_cache_add(struct page *);
+ extern void lru_cache_add_anon(struct page *page);
+ extern void lru_cache_add_file(struct page *page);
+diff --git a/include/linux/swork.h b/include/linux/swork.h
+new file mode 100644
+index 000000000000..f175fa9a6016
+--- /dev/null
++++ b/include/linux/swork.h
+@@ -0,0 +1,24 @@
++#ifndef _LINUX_SWORK_H
++#define _LINUX_SWORK_H
++
++#include <linux/list.h>
++
++struct swork_event {
++ struct list_head item;
++ unsigned long flags;
++ void (*func)(struct swork_event *);
++};
++
++static inline void INIT_SWORK(struct swork_event *event,
++ void (*func)(struct swork_event *))
++{
++ event->flags = 0;
++ event->func = func;
++}
++
++bool swork_queue(struct swork_event *sev);
++
++int swork_get(void);
++void swork_put(void);
++
++#endif /* _LINUX_SWORK_H */
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index 2b5b10eed74f..8bf15b1858f5 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -103,7 +103,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+ #define test_thread_flag(flag) \
+ test_ti_thread_flag(current_thread_info(), flag)
+
+-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++#ifdef CONFIG_PREEMPT_LAZY
++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
++ test_thread_flag(TIF_NEED_RESCHED_LAZY))
++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
++
++#else
++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
++#define tif_need_resched_lazy() 0
++#endif
+
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+ static inline int arch_within_stack_frames(const void * const stack,
+diff --git a/include/linux/timer.h b/include/linux/timer.h
+index 51d601f192d4..83cea629efe1 100644
+--- a/include/linux/timer.h
++++ b/include/linux/timer.h
+@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
+
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ extern int del_timer_sync(struct timer_list *timer);
+ #else
+ # define del_timer_sync(t) del_timer(t)
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index be007610ceb0..15154b13a53b 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -56,6 +56,9 @@ struct trace_entry {
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
++ unsigned short migrate_disable;
++ unsigned short padding;
++ unsigned char preempt_lazy_count;
+ };
+
+ #define TRACE_EVENT_TYPE_MAX \
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index f30c187ed785..83bf0f798426 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -24,6 +24,7 @@ static __always_inline void pagefault_disabled_dec(void)
+ */
+ static inline void pagefault_disable(void)
+ {
++ migrate_disable();
+ pagefault_disabled_inc();
+ /*
+ * make sure to have issued the store before a pagefault
+@@ -40,6 +41,7 @@ static inline void pagefault_enable(void)
+ */
+ barrier();
+ pagefault_disabled_dec();
++ migrate_enable();
+ }
+
+ /*
+diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
+index 4a29c75b146e..0a294e950df8 100644
+--- a/include/linux/uprobes.h
++++ b/include/linux/uprobes.h
+@@ -27,6 +27,7 @@
+ #include <linux/errno.h>
+ #include <linux/rbtree.h>
+ #include <linux/types.h>
++#include <linux/wait.h>
+
+ struct vm_area_struct;
+ struct mm_struct;
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index 613771909b6e..e28c5a43229d 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
+ */
+ static inline void __count_vm_event(enum vm_event_item item)
+ {
++ preempt_disable_rt();
+ raw_cpu_inc(vm_event_states.event[item]);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_event(enum vm_event_item item)
+@@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
+
+ static inline void __count_vm_events(enum vm_event_item item, long delta)
+ {
++ preempt_disable_rt();
+ raw_cpu_add(vm_event_states.event[item], delta);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_events(enum vm_event_item item, long delta)
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index c3ff74d764fa..60222150a409 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -8,6 +8,7 @@
+ #include <linux/spinlock.h>
+ #include <asm/current.h>
+ #include <uapi/linux/wait.h>
++#include <linux/atomic.h>
+
+ typedef struct __wait_queue wait_queue_t;
+ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 6835d224d47b..55a5a9698f14 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -446,7 +446,7 @@ static inline void dst_confirm(struct dst_entry *dst)
+ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
+ struct sk_buff *skb)
+ {
+- const struct hh_cache *hh;
++ struct hh_cache *hh;
+
+ if (dst->pending_confirm) {
+ unsigned long now = jiffies;
+diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
+index 231e121cc7d9..d125222b979d 100644
+--- a/include/net/gen_stats.h
++++ b/include/net/gen_stats.h
+@@ -5,6 +5,7 @@
+ #include <linux/socket.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/pkt_sched.h>
++#include <net/net_seq_lock.h>
+
+ struct gnet_stats_basic_cpu {
+ struct gnet_stats_basic_packed bstats;
+@@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+ spinlock_t *lock, struct gnet_dump *d,
+ int padattr);
+
+-int gnet_stats_copy_basic(const seqcount_t *running,
++int gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+-void __gnet_stats_copy_basic(const seqcount_t *running,
++void __gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+@@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running, struct nlattr *opt);
++ net_seqlock_t *running, struct nlattr *opt);
+ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_rate_est64 *rate_est);
+ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running, struct nlattr *opt);
++ net_seqlock_t *running, struct nlattr *opt);
+ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+ const struct gnet_stats_rate_est64 *rate_est);
+ #endif
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 8b683841e574..bf656008f6e7 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
+ }
+ #endif
+
+-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+ {
+ unsigned int seq;
+ int hh_len;
+@@ -501,7 +501,7 @@ struct neighbour_cb {
+
+ #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
+
+-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
+ const struct net_device *dev)
+ {
+ unsigned int seq;
+diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
+new file mode 100644
+index 000000000000..a7034298a82a
+--- /dev/null
++++ b/include/net/net_seq_lock.h
+@@ -0,0 +1,15 @@
++#ifndef __NET_NET_SEQ_LOCK_H__
++#define __NET_NET_SEQ_LOCK_H__
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define net_seqlock_t seqlock_t
++# define net_seq_begin(__r) read_seqbegin(__r)
++# define net_seq_retry(__r, __s) read_seqretry(__r, __s)
++
++#else
++# define net_seqlock_t seqcount_t
++# define net_seq_begin(__r) read_seqcount_begin(__r)
++# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s)
++#endif
++
++#endif
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index d061ffeb1e71..12ef433dc3b8 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -70,6 +70,7 @@ struct netns_ipv4 {
+
+ int sysctl_icmp_echo_ignore_all;
+ int sysctl_icmp_echo_ignore_broadcasts;
++ int sysctl_icmp_echo_sysrq;
+ int sysctl_icmp_ignore_bogus_error_responses;
+ int sysctl_icmp_ratelimit;
+ int sysctl_icmp_ratemask;
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 909aff2db2b3..c47219d6e4bc 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -10,6 +10,7 @@
+ #include <linux/dynamic_queue_limits.h>
+ #include <net/gen_stats.h>
+ #include <net/rtnetlink.h>
++#include <net/net_seq_lock.h>
+
+ struct Qdisc_ops;
+ struct qdisc_walker;
+@@ -78,7 +79,7 @@ struct Qdisc {
+ struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
+ struct sk_buff_head q;
+ struct gnet_stats_basic_packed bstats;
+- seqcount_t running;
++ net_seqlock_t running;
+ struct gnet_stats_queue qstats;
+ unsigned long state;
+ struct Qdisc *next_sched;
+@@ -90,13 +91,22 @@ struct Qdisc {
+ spinlock_t busylock ____cacheline_aligned_in_smp;
+ };
+
+-static inline bool qdisc_is_running(const struct Qdisc *qdisc)
++static inline bool qdisc_is_running(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ return spin_is_locked(&qdisc->running.lock) ? true : false;
++#else
+ return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
++#endif
+ }
+
+ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ if (try_write_seqlock(&qdisc->running))
++ return true;
++ return false;
++#else
+ if (qdisc_is_running(qdisc))
+ return false;
+ /* Variant of write_seqcount_begin() telling lockdep a trylock
+@@ -105,11 +115,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ raw_write_seqcount_begin(&qdisc->running);
+ seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
+ return true;
++#endif
+ }
+
+ static inline void qdisc_run_end(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ write_sequnlock(&qdisc->running);
++#else
+ write_seqcount_end(&qdisc->running);
++#endif
+ }
+
+ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
+@@ -300,7 +315,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
+ return qdisc_lock(root);
+ }
+
+-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
++static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
+ {
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
+new file mode 100644
+index 000000000000..f7710de1b1f3
+--- /dev/null
++++ b/include/trace/events/hist.h
+@@ -0,0 +1,73 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM hist
++
++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_HIST_H
++
++#include "latency_hist.h"
++#include <linux/tracepoint.h>
++
++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
++#define trace_preemptirqsoff_hist(a, b)
++#define trace_preemptirqsoff_hist_rcuidle(a, b)
++#else
++TRACE_EVENT(preemptirqsoff_hist,
++
++ TP_PROTO(int reason, int starthist),
++
++ TP_ARGS(reason, starthist),
++
++ TP_STRUCT__entry(
++ __field(int, reason)
++ __field(int, starthist)
++ ),
++
++ TP_fast_assign(
++ __entry->reason = reason;
++ __entry->starthist = starthist;
++ ),
++
++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
++ __entry->starthist ? "start" : "stop")
++);
++#endif
++
++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
++#define trace_hrtimer_interrupt(a, b, c, d)
++#else
++TRACE_EVENT(hrtimer_interrupt,
++
++ TP_PROTO(int cpu, long long offset, struct task_struct *curr,
++ struct task_struct *task),
++
++ TP_ARGS(cpu, offset, curr, task),
++
++ TP_STRUCT__entry(
++ __field(int, cpu)
++ __field(long long, offset)
++ __array(char, ccomm, TASK_COMM_LEN)
++ __field(int, cprio)
++ __array(char, tcomm, TASK_COMM_LEN)
++ __field(int, tprio)
++ ),
++
++ TP_fast_assign(
++ __entry->cpu = cpu;
++ __entry->offset = offset;
++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
++ __entry->cprio = curr->prio;
++ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
++ task != NULL ? TASK_COMM_LEN : 7);
++ __entry->tprio = task != NULL ? task->prio : -1;
++ ),
++
++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
++ __entry->cpu, __entry->offset, __entry->ccomm,
++ __entry->cprio, __entry->tcomm, __entry->tprio)
++);
++#endif
++
++#endif /* _TRACE_HIST_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
+new file mode 100644
+index 000000000000..d3f2fbd560b1
+--- /dev/null
++++ b/include/trace/events/latency_hist.h
+@@ -0,0 +1,29 @@
++#ifndef _LATENCY_HIST_H
++#define _LATENCY_HIST_H
++
++enum hist_action {
++ IRQS_ON,
++ PREEMPT_ON,
++ TRACE_STOP,
++ IRQS_OFF,
++ PREEMPT_OFF,
++ TRACE_START,
++};
++
++static char *actions[] = {
++ "IRQS_ON",
++ "PREEMPT_ON",
++ "TRACE_STOP",
++ "IRQS_OFF",
++ "PREEMPT_OFF",
++ "TRACE_START",
++};
++
++static inline char *getaction(int action)
++{
++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
++ return actions[action];
++ return "unknown";
++}
++
++#endif /* _LATENCY_HIST_H */
+diff --git a/init/Kconfig b/init/Kconfig
+index cac3f096050d..b6c9166d878a 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -496,7 +496,7 @@ config TINY_RCU
+
+ config RCU_EXPERT
+ bool "Make expert-level adjustments to RCU configuration"
+- default n
++ default y if PREEMPT_RT_FULL
+ help
+ This option needs to be enabled if you wish to make
+ expert-level adjustments to RCU configuration. By default,
+@@ -613,7 +613,7 @@ config RCU_FANOUT_LEAF
+
+ config RCU_FAST_NO_HZ
+ bool "Accelerate last non-dyntick-idle CPU's grace periods"
+- depends on NO_HZ_COMMON && SMP && RCU_EXPERT
++ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
+ default n
+ help
+ This option permits CPUs to enter dynticks-idle state even if
+@@ -640,7 +640,7 @@ config TREE_RCU_TRACE
+ config RCU_BOOST
+ bool "Enable RCU priority boosting"
+ depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
+- default n
++ default y if PREEMPT_RT_FULL
+ help
+ This option boosts the priority of preempted RCU readers that
+ block the current preemptible RCU grace period for too long.
+@@ -1054,6 +1054,7 @@ config CFS_BANDWIDTH
+ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+@@ -1761,6 +1762,7 @@ choice
+
+ config SLAB
+ bool "SLAB"
++ depends on !PREEMPT_RT_FULL
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
+ help
+ The regular slab allocator that is established and known to work
+@@ -1781,6 +1783,7 @@ config SLUB
+ config SLOB
+ depends on EXPERT
+ bool "SLOB (Simple Allocator)"
++ depends on !PREEMPT_RT_FULL
+ help
+ SLOB replaces the stock allocator with a drastically simpler
+ allocator. SLOB is generally more space efficient but
+@@ -1799,7 +1802,7 @@ config SLAB_FREELIST_RANDOM
+
+ config SLUB_CPU_PARTIAL
+ default y
+- depends on SLUB && SMP
++ depends on SLUB && SMP && !PREEMPT_RT_FULL
+ bool "SLUB per cpu partial cache"
+ help
+ Per cpu partial caches accellerate objects allocation and freeing
+diff --git a/init/Makefile b/init/Makefile
+index 7bc47ee31c36..88cf473554e0 100644
+--- a/init/Makefile
++++ b/init/Makefile
+@@ -33,4 +33,4 @@ $(obj)/version.o: include/generated/compile.h
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+diff --git a/init/main.c b/init/main.c
+index a8a58e2794a5..e4c979e37a91 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -507,6 +507,7 @@ asmlinkage __visible void __init start_kernel(void)
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
++ softirq_early_init();
+ boot_cpu_state_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+diff --git a/ipc/msg.c b/ipc/msg.c
+index c6521c205cb4..996d89023552 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -183,20 +183,14 @@ static void ss_wakeup(struct list_head *h, int kill)
+ }
+ }
+
+-static void expunge_all(struct msg_queue *msq, int res)
++static void expunge_all(struct msg_queue *msq, int res,
++ struct wake_q_head *wake_q)
+ {
+ struct msg_receiver *msr, *t;
+
+ list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
+- msr->r_msg = NULL; /* initialize expunge ordering */
+- wake_up_process(msr->r_tsk);
+- /*
+- * Ensure that the wakeup is visible before setting r_msg as
+- * the receiving end depends on it: either spinning on a nil,
+- * or dealing with -EAGAIN cases. See lockless receive part 1
+- * and 2 in do_msgrcv().
+- */
+- smp_wmb(); /* barrier (B) */
++
++ wake_q_add(wake_q, msr->r_tsk);
+ msr->r_msg = ERR_PTR(res);
+ }
+ }
+@@ -213,11 +207,13 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ {
+ struct msg_msg *msg, *t;
+ struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
++ WAKE_Q(wake_q);
+
+- expunge_all(msq, -EIDRM);
++ expunge_all(msq, -EIDRM, &wake_q);
+ ss_wakeup(&msq->q_senders, 1);
+ msg_rmid(ns, msq);
+ ipc_unlock_object(&msq->q_perm);
++ wake_up_q(&wake_q);
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
+@@ -342,6 +338,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+ struct kern_ipc_perm *ipcp;
+ struct msqid64_ds uninitialized_var(msqid64);
+ struct msg_queue *msq;
++ WAKE_Q(wake_q);
+ int err;
+
+ if (cmd == IPC_SET) {
+@@ -389,7 +386,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+ /* sleeping receivers might be excluded by
+ * stricter permissions.
+ */
+- expunge_all(msq, -EAGAIN);
++ expunge_all(msq, -EAGAIN, &wake_q);
+ /* sleeping senders might be able to send
+ * due to a larger queue size.
+ */
+@@ -402,6 +399,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+
+ out_unlock0:
+ ipc_unlock_object(&msq->q_perm);
++ wake_up_q(&wake_q);
+ out_unlock1:
+ rcu_read_unlock();
+ out_up:
+@@ -566,7 +564,8 @@ static int testmsg(struct msg_msg *msg, long type, int mode)
+ return 0;
+ }
+
+-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
++static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
++ struct wake_q_head *wake_q)
+ {
+ struct msg_receiver *msr, *t;
+
+@@ -577,27 +576,13 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
+
+ list_del(&msr->r_list);
+ if (msr->r_maxsize < msg->m_ts) {
+- /* initialize pipelined send ordering */
+- msr->r_msg = NULL;
+- wake_up_process(msr->r_tsk);
+- /* barrier (B) see barrier comment below */
+- smp_wmb();
++ wake_q_add(wake_q, msr->r_tsk);
+ msr->r_msg = ERR_PTR(-E2BIG);
+ } else {
+- msr->r_msg = NULL;
+ msq->q_lrpid = task_pid_vnr(msr->r_tsk);
+ msq->q_rtime = get_seconds();
+- wake_up_process(msr->r_tsk);
+- /*
+- * Ensure that the wakeup is visible before
+- * setting r_msg, as the receiving can otherwise
+- * exit - once r_msg is set, the receiver can
+- * continue. See lockless receive part 1 and 2
+- * in do_msgrcv(). Barrier (B).
+- */
+- smp_wmb();
++ wake_q_add(wake_q, msr->r_tsk);
+ msr->r_msg = msg;
+-
+ return 1;
+ }
+ }
+@@ -613,6 +598,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ struct msg_msg *msg;
+ int err;
+ struct ipc_namespace *ns;
++ WAKE_Q(wake_q);
+
+ ns = current->nsproxy->ipc_ns;
+
+@@ -698,7 +684,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ msq->q_lspid = task_tgid_vnr(current);
+ msq->q_stime = get_seconds();
+
+- if (!pipelined_send(msq, msg)) {
++ if (!pipelined_send(msq, msg, &wake_q)) {
+ /* no one is waiting for this message, enqueue it */
+ list_add_tail(&msg->m_list, &msq->q_messages);
+ msq->q_cbytes += msgsz;
+@@ -712,6 +698,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+
+ out_unlock0:
+ ipc_unlock_object(&msq->q_perm);
++ wake_up_q(&wake_q);
+ out_unlock1:
+ rcu_read_unlock();
+ if (msg != NULL)
+@@ -932,57 +919,25 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
+ rcu_read_lock();
+
+ /* Lockless receive, part 2:
+- * Wait until pipelined_send or expunge_all are outside of
+- * wake_up_process(). There is a race with exit(), see
+- * ipc/mqueue.c for the details. The correct serialization
+- * ensures that a receiver cannot continue without the wakeup
+- * being visibible _before_ setting r_msg:
++ * The work in pipelined_send() and expunge_all():
++ * - Set pointer to message
++ * - Queue the receiver task for later wakeup
++ * - Wake up the process after the lock is dropped.
+ *
+- * CPU 0 CPU 1
+- * <loop receiver>
+- * smp_rmb(); (A) <-- pair -. <waker thread>
+- * <load ->r_msg> | msr->r_msg = NULL;
+- * | wake_up_process();
+- * <continue> `------> smp_wmb(); (B)
+- * msr->r_msg = msg;
+- *
+- * Where (A) orders the message value read and where (B) orders
+- * the write to the r_msg -- done in both pipelined_send and
+- * expunge_all.
++ * Should the process wake up before this wakeup (due to a
++ * signal) it will either see the message and continue …
+ */
+- for (;;) {
+- /*
+- * Pairs with writer barrier in pipelined_send
+- * or expunge_all.
+- */
+- smp_rmb(); /* barrier (A) */
+- msg = (struct msg_msg *)msr_d.r_msg;
+- if (msg)
+- break;
+
+- /*
+- * The cpu_relax() call is a compiler barrier
+- * which forces everything in this loop to be
+- * re-loaded.
+- */
+- cpu_relax();
+- }
+-
+- /* Lockless receive, part 3:
+- * If there is a message or an error then accept it without
+- * locking.
+- */
++ msg = (struct msg_msg *)msr_d.r_msg;
+ if (msg != ERR_PTR(-EAGAIN))
+ goto out_unlock1;
+
+- /* Lockless receive, part 3:
+- * Acquire the queue spinlock.
+- */
++ /*
++ * … or see -EAGAIN, acquire the lock to check the message
++ * again.
++ */
+ ipc_lock_object(&msq->q_perm);
+
+- /* Lockless receive, part 4:
+- * Repeat test after acquiring the spinlock.
+- */
+ msg = (struct msg_msg *)msr_d.r_msg;
+ if (msg != ERR_PTR(-EAGAIN))
+ goto out_unlock0;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 5e318c5f749d..ec9203971539 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -712,6 +712,13 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
+ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ struct sem_queue *q, int error)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *p = q->sleeper;
++ get_task_struct(p);
++ q->status = error;
++ wake_up_process(p);
++ put_task_struct(p);
++#else
+ if (list_empty(pt)) {
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+@@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ q->pid = error;
+
+ list_add_tail(&q->list, pt);
++#endif
+ }
+
+ /**
+@@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ */
+ static void wake_up_sem_queue_do(struct list_head *pt)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ struct sem_queue *q, *t;
+ int did_something;
+
+@@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
+ }
+ if (did_something)
+ preempt_enable();
++#endif
+ }
+
+ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index ebdb0043203a..b9e6aa7e5aa6 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
+
+ config MUTEX_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config RWSEM_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config LOCK_SPIN_ON_OWNER
+ def_bool y
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 3f9c97419f02..11dbe26a8279 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,3 +1,16 @@
++config PREEMPT
++ bool
++ select PREEMPT_COUNT
++
++config PREEMPT_RT_BASE
++ bool
++ select PREEMPT
++
++config HAVE_PREEMPT_LAZY
++ bool
++
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
+
+ choice
+ prompt "Preemption Model"
+@@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY
+
+ Select this if you are building a kernel for a desktop system.
+
+-config PREEMPT
++config PREEMPT__LL
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+- select PREEMPT_COUNT
++ select PREEMPT
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+ help
+ This option reduces the latency of the kernel by making
+@@ -52,6 +65,22 @@ config PREEMPT
+ embedded system with latency requirements in the milliseconds
+ range.
+
++config PREEMPT_RTB
++ bool "Preemptible Kernel (Basic RT)"
++ select PREEMPT_RT_BASE
++ help
++ This option is basically the same as (Low-Latency Desktop) but
++ enables changes which are preliminary for the full preemptible
++ RT kernel.
++
++config PREEMPT_RT_FULL
++ bool "Fully Preemptible Kernel (RT)"
++ depends on IRQ_FORCED_THREADING
++ select PREEMPT_RT_BASE
++ select PREEMPT_RCU
++ help
++ All and everything
++
+ endchoice
+
+ config PREEMPT_COUNT
+diff --git a/kernel/Makefile b/kernel/Makefile
+index e2ec54e2b952..bff8214bf5f6 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -11,6 +11,13 @@ obj-y = fork.o exec_domain.o panic.o \
+ notifier.o ksysfs.o cred.o reboot.o \
+ async.o range.o smpboot.o
+
++# Tracing may do some dangerous __builtin_return_address() operations
++# We know they are dangerous, we don't need gcc telling us that.
++ifdef CONFIG_USING_GET_LOCK_PARENT_IP
++FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
++KBUILD_CFLAGS += $(FRAME_CFLAGS)
++endif
++
+ obj-$(CONFIG_MULTIUSER) += groups.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index d6b729beba49..11d61b2ca938 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5027,10 +5027,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
+
+-static void css_release_work_fn(struct work_struct *work)
++static void css_release_work_fn(struct swork_event *sev)
+ {
+ struct cgroup_subsys_state *css =
+- container_of(work, struct cgroup_subsys_state, destroy_work);
++ container_of(sev, struct cgroup_subsys_state, destroy_swork);
+ struct cgroup_subsys *ss = css->ss;
+ struct cgroup *cgrp = css->cgroup;
+
+@@ -5071,8 +5071,8 @@ static void css_release(struct percpu_ref *ref)
+ struct cgroup_subsys_state *css =
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+- INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ INIT_SWORK(&css->destroy_swork, css_release_work_fn);
++ swork_queue(&css->destroy_swork);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5716,6 +5716,7 @@ static int __init cgroup_wq_init(void)
+ */
+ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+ BUG_ON(!cgroup_destroy_wq);
++ BUG_ON(swork_get());
+
+ /*
+ * Used to destroy pidlists and separate to serve as flush domain.
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 341bf80f80bd..b575429a8a00 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -152,8 +152,8 @@ static struct {
+ #endif
+ } cpu_hotplug = {
+ .active_writer = NULL,
+- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
++ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ .dep_map = {.name = "cpu_hotplug.lock" },
+ #endif
+@@ -166,6 +166,289 @@ static struct {
+ #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
+ #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+
++/**
++ * hotplug_pcp - per cpu hotplug descriptor
++ * @unplug: set when pin_current_cpu() needs to sync tasks
++ * @sync_tsk: the task that waits for tasks to finish pinned sections
++ * @refcount: counter of tasks in pinned sections
++ * @grab_lock: set when the tasks entering pinned sections should wait
++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
++ * @mutex_init: zero if the mutex hasn't been initialized yet.
++ *
++ * Although @unplug and @sync_tsk may point to the same task, the @unplug
++ * is used as a flag and still exists after @sync_tsk has exited and
++ * @sync_tsk set to NULL.
++ */
++struct hotplug_pcp {
++ struct task_struct *unplug;
++ struct task_struct *sync_tsk;
++ int refcount;
++ int grab_lock;
++ struct completion synced;
++ struct completion unplug_wait;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * Note, on PREEMPT_RT, the hotplug lock must save the state of
++ * the task, otherwise the mutex will cause the task to fail
++ * to sleep when required. (Because it's called from migrate_disable())
++ *
++ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
++ * state.
++ */
++ spinlock_t lock;
++#else
++ struct mutex mutex;
++#endif
++ int mutex_init;
++};
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
++#else
++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
++#endif
++
++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
++
++/**
++ * pin_current_cpu - Prevent the current cpu from being unplugged
++ *
++ * Lightweight version of get_online_cpus() to prevent cpu from being
++ * unplugged when code runs in a migration disabled region.
++ *
++ * Must be called with preemption disabled (preempt_count = 1)!
++ */
++void pin_current_cpu(void)
++{
++ struct hotplug_pcp *hp;
++ int force = 0;
++
++retry:
++ hp = this_cpu_ptr(&hotplug_pcp);
++
++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
++ hp->unplug == current) {
++ hp->refcount++;
++ return;
++ }
++ if (hp->grab_lock) {
++ preempt_enable();
++ hotplug_lock(hp);
++ hotplug_unlock(hp);
++ } else {
++ preempt_enable();
++ /*
++ * Try to push this task off of this CPU.
++ */
++ if (!migrate_me()) {
++ preempt_disable();
++ hp = this_cpu_ptr(&hotplug_pcp);
++ if (!hp->grab_lock) {
++ /*
++ * Just let it continue it's already pinned
++ * or about to sleep.
++ */
++ force = 1;
++ goto retry;
++ }
++ preempt_enable();
++ }
++ }
++ preempt_disable();
++ goto retry;
++}
++
++/**
++ * unpin_current_cpu - Allow unplug of current cpu
++ *
++ * Must be called with preemption or interrupts disabled!
++ */
++void unpin_current_cpu(void)
++{
++ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
++
++ WARN_ON(hp->refcount <= 0);
++
++ /* This is safe. sync_unplug_thread is pinned to this cpu */
++ if (!--hp->refcount && hp->unplug && hp->unplug != current)
++ wake_up_process(hp->unplug);
++}
++
++static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (hp->refcount) {
++ schedule_preempt_disabled();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++}
++
++static int sync_unplug_thread(void *data)
++{
++ struct hotplug_pcp *hp = data;
++
++ wait_for_completion(&hp->unplug_wait);
++ preempt_disable();
++ hp->unplug = current;
++ wait_for_pinned_cpus(hp);
++
++ /*
++ * This thread will synchronize the cpu_down() with threads
++ * that have pinned the CPU. When the pinned CPU count reaches
++ * zero, we inform the cpu_down code to continue to the next step.
++ */
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ preempt_enable();
++ complete(&hp->synced);
++
++ /*
++ * If all succeeds, the next step will need tasks to wait till
++ * the CPU is offline before continuing. To do this, the grab_lock
++ * is set and tasks going into pin_current_cpu() will block on the
++ * mutex. But we still need to wait for those that are already in
++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
++ * will kick this thread out.
++ */
++ while (!hp->grab_lock && !kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++
++ /* Make sure grab_lock is seen before we see a stale completion */
++ smp_mb();
++
++ /*
++ * Now just before cpu_down() enters stop machine, we need to make
++ * sure all tasks that are in pinned CPU sections are out, and new
++ * tasks will now grab the lock, keeping them from entering pinned
++ * CPU sections.
++ */
++ if (!kthread_should_stop()) {
++ preempt_disable();
++ wait_for_pinned_cpus(hp);
++ preempt_enable();
++ complete(&hp->synced);
++ }
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ set_current_state(TASK_RUNNING);
++
++ /*
++ * Force this thread off this CPU as it's going down and
++ * we don't want any more work on this CPU.
++ */
++ current->flags &= ~PF_NO_SETAFFINITY;
++ set_cpus_allowed_ptr(current, cpu_present_mask);
++ migrate_me();
++ return 0;
++}
++
++static void __cpu_unplug_sync(struct hotplug_pcp *hp)
++{
++ wake_up_process(hp->sync_tsk);
++ wait_for_completion(&hp->synced);
++}
++
++static void __cpu_unplug_wait(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ complete(&hp->unplug_wait);
++ wait_for_completion(&hp->synced);
++}
++
++/*
++ * Start the sync_unplug_thread on the target cpu and wait for it to
++ * complete.
++ */
++static int cpu_unplug_begin(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++ int err;
++
++ /* Protected by cpu_hotplug.lock */
++ if (!hp->mutex_init) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ spin_lock_init(&hp->lock);
++#else
++ mutex_init(&hp->mutex);
++#endif
++ hp->mutex_init = 1;
++ }
++
++ /* Inform the scheduler to migrate tasks off this CPU */
++ tell_sched_cpu_down_begin(cpu);
++
++ init_completion(&hp->synced);
++ init_completion(&hp->unplug_wait);
++
++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
++ if (IS_ERR(hp->sync_tsk)) {
++ err = PTR_ERR(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++ return err;
++ }
++ kthread_bind(hp->sync_tsk, cpu);
++
++ /*
++ * Wait for tasks to get out of the pinned sections,
++ * it's still OK if new tasks enter. Some CPU notifiers will
++ * wait for tasks that are going to enter these sections and
++ * we must not have them block.
++ */
++ wake_up_process(hp->sync_tsk);
++ return 0;
++}
++
++static void cpu_unplug_sync(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ init_completion(&hp->synced);
++ /* The completion needs to be initialzied before setting grab_lock */
++ smp_wmb();
++
++ /* Grab the mutex before setting grab_lock */
++ hotplug_lock(hp);
++ hp->grab_lock = 1;
++
++ /*
++ * The CPU notifiers have been completed.
++ * Wait for tasks to get out of pinned CPU sections and have new
++ * tasks block until the CPU is completely down.
++ */
++ __cpu_unplug_sync(hp);
++
++ /* All done with the sync thread */
++ kthread_stop(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++}
++
++static void cpu_unplug_done(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ hp->unplug = NULL;
++ /* Let all tasks know cpu unplug is finished before cleaning up */
++ smp_wmb();
++
++ if (hp->sync_tsk)
++ kthread_stop(hp->sync_tsk);
++
++ if (hp->grab_lock) {
++ hotplug_unlock(hp);
++ /* protected by cpu_hotplug.lock */
++ hp->grab_lock = 0;
++ }
++ tell_sched_cpu_down_done(cpu);
++}
+
+ void get_online_cpus(void)
+ {
+@@ -710,10 +993,14 @@ static int takedown_cpu(unsigned int cpu)
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int err;
+
++ __cpu_unplug_wait(cpu);
+ /* Park the smpboot threads */
+ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
+ smpboot_park_threads(cpu);
+
++ /* Notifiers are done. Don't let any more tasks pin this CPU. */
++ cpu_unplug_sync(cpu);
++
+ /*
+ * Prevent irq alloc/free while the dying cpu reorganizes the
+ * interrupt affinities.
+@@ -799,6 +1086,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int prev_state, ret = 0;
+ bool hasdied = false;
++ int mycpu;
++ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_org;
+
+ if (num_online_cpus() == 1)
+ return -EBUSY;
+@@ -806,7 +1096,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ if (!cpu_present(cpu))
+ return -EINVAL;
+
++ /* Move the downtaker off the unplug cpu */
++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++ return -ENOMEM;
++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
++ free_cpumask_var(cpumask);
++ return -ENOMEM;
++ }
++
++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
++ set_cpus_allowed_ptr(current, cpumask);
++ free_cpumask_var(cpumask);
++ migrate_disable();
++ mycpu = smp_processor_id();
++ if (mycpu == cpu) {
++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
++ migrate_enable();
++ ret = -EBUSY;
++ goto restore_cpus;
++ }
++
++ migrate_enable();
+ cpu_hotplug_begin();
++ ret = cpu_unplug_begin(cpu);
++ if (ret) {
++ printk("cpu_unplug_begin(%d) failed\n", cpu);
++ goto out_cancel;
++ }
+
+ cpuhp_tasks_frozen = tasks_frozen;
+
+@@ -845,10 +1162,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+
+ hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
+ out:
++ cpu_unplug_done(cpu);
++out_cancel:
+ cpu_hotplug_done();
+ /* This post dead nonsense must die */
+ if (!ret && hasdied)
+ cpu_notify_nofail(CPU_POST_DEAD, cpu);
++restore_cpus:
++ set_cpus_allowed_ptr(current, cpumask_org);
++ free_cpumask_var(cpumask_org);
+ return ret;
+ }
+
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index fc1ef736253c..83c666537a7a 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
+ int linecount;
+ int colcount;
+ int logging, saved_loglevel = 0;
+- int saved_trap_printk;
+ int got_printf_lock = 0;
+ int retlen = 0;
+ int fnd, len;
+@@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
+ unsigned long uninitialized_var(flags);
+
+ preempt_disable();
+- saved_trap_printk = kdb_trap_printk;
+- kdb_trap_printk = 0;
+
+ /* Serialize kdb_printf if multiple cpus try to write at once.
+ * But if any cpu goes recursive in kdb, just print the output,
+@@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
+ } else {
+ __release(kdb_printf_lock);
+ }
+- kdb_trap_printk = saved_trap_printk;
+ preempt_enable();
+ return retlen;
+ }
+@@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
+ va_list ap;
+ int r;
+
++ kdb_trap_printk++;
+ va_start(ap, fmt);
+ r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
+ va_end(ap);
++ kdb_trap_printk--;
+
+ return r;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index fc9bb2225291..bc2db7e1ae04 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1042,6 +1042,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
+ raw_spin_lock_init(&cpuctx->hrtimer_lock);
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ timer->function = perf_mux_hrtimer_handler;
++ timer->irqsafe = 1;
+ }
+
+ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
+@@ -8215,6 +8216,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
++ hwc->hrtimer.irqsafe = 1;
+
+ /*
+ * Since hrtimers have a fixed rate, we can do a static freq->period
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 091a78be3b09..170b672bbb38 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index beb31725f7e2..e398cb9e62fa 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -251,7 +251,9 @@ static inline void put_signal_struct(struct signal_struct *sig)
+ if (atomic_dec_and_test(&sig->sigcnt))
+ free_signal_struct(sig);
+ }
+-
++#ifdef CONFIG_PREEMPT_RT_BASE
++static
++#endif
+ void __put_task_struct(struct task_struct *tsk)
+ {
+ WARN_ON(!tsk->exit_state);
+@@ -268,7 +270,18 @@ void __put_task_struct(struct task_struct *tsk)
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
++
++ __put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
+
+ void __init __weak arch_task_cache_init(void) { }
+
+@@ -702,6 +715,19 @@ void __mmdrop(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++ __mmdrop(mm);
++}
++#endif
++
+ static inline void __mmput(struct mm_struct *mm)
+ {
+ VM_BUG_ON(atomic_read(&mm->mm_users));
+@@ -1274,6 +1300,9 @@ static void rt_mutex_init_task(struct task_struct *p)
+ */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ tsk->posix_timer_list = NULL;
++#endif
+ tsk->cputime_expires.prof_exp = 0;
+ tsk->cputime_expires.virt_exp = 0;
+ tsk->cputime_expires.sched_exp = 0;
+@@ -1399,6 +1428,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ spin_lock_init(&p->alloc_lock);
+
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
+
+ p->utime = p->stime = p->gtime = 0;
+ p->utimescaled = p->stimescaled = 0;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 46cb3a301bc1..6de82b959729 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -895,7 +895,9 @@ void exit_pi_state_list(struct task_struct *curr)
+ * task still owns the PI-state:
+ */
+ if (head->next != next) {
++ raw_spin_unlock_irq(&curr->pi_lock);
+ spin_unlock(&hb->lock);
++ raw_spin_lock_irq(&curr->pi_lock);
+ continue;
+ }
+
+@@ -1290,6 +1292,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 uninitialized_var(curval), newval;
+ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
+ bool deboost;
+ int ret = 0;
+
+@@ -1356,7 +1359,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
++ &wake_sleeper_q);
+
+ /*
+ * First unlock HB so the waiter does not spin on it once he got woken
+@@ -1364,8 +1368,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ * deboost first (and lose our higher priority), then the task might get
+ * scheduled away before the wake up can take place.
+ */
+- spin_unlock(&hb->lock);
++ deboost |= spin_unlock_no_deboost(&hb->lock);
+ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
+ if (deboost)
+ rt_mutex_adjust_prio(current);
+
+@@ -1915,6 +1920,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ requeue_pi_wake_futex(this, &key2, hb2);
+ drop_count++;
+ continue;
++ } else if (ret == -EAGAIN) {
++ /*
++ * Waiter was woken by timeout or
++ * signal and has set pi_blocked_on to
++ * PI_WAKEUP_INPROGRESS before we
++ * tried to enqueue it on the rtmutex.
++ */
++ this->pi_state = NULL;
++ put_pi_state(pi_state);
++ continue;
+ } else if (ret) {
+ /*
+ * rt_mutex_start_proxy_lock() detected a
+@@ -2805,7 +2820,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct rt_mutex *pi_mutex = NULL;
+- struct futex_hash_bucket *hb;
++ struct futex_hash_bucket *hb, *hb2;
+ union futex_key key2 = FUTEX_KEY_INIT;
+ struct futex_q q = futex_q_init;
+ int res, ret;
+@@ -2830,10 +2845,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * The waiter is allocated on our stack, manipulated by the requeue
+ * code while we sleep on uaddr.
+ */
+- debug_rt_mutex_init_waiter(&rt_waiter);
+- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
+- RB_CLEAR_NODE(&rt_waiter.tree_entry);
+- rt_waiter.task = NULL;
++ rt_mutex_init_waiter(&rt_waiter, false);
+
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
+ if (unlikely(ret != 0))
+@@ -2864,20 +2876,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+- spin_lock(&hb->lock);
+- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+- spin_unlock(&hb->lock);
+- if (ret)
+- goto out_put_keys;
++ /*
++ * On RT we must avoid races with requeue and trying to block
++ * on two mutexes (hb->lock and uaddr2's rtmutex) by
++ * serializing access to pi_blocked_on with pi_lock.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ if (current->pi_blocked_on) {
++ /*
++ * We have been requeued or are in the process of
++ * being requeued.
++ */
++ raw_spin_unlock_irq(&current->pi_lock);
++ } else {
++ /*
++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++ * prevents a concurrent requeue from moving us to the
++ * uaddr2 rtmutex. After that we can safely acquire
++ * (and possibly block on) hb->lock.
++ */
++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ spin_lock(&hb->lock);
++
++ /*
++ * Clean up pi_blocked_on. We might leak it otherwise
++ * when we succeeded with the hb->lock in the fast
++ * path.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ current->pi_blocked_on = NULL;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++ spin_unlock(&hb->lock);
++ if (ret)
++ goto out_put_keys;
++ }
+
+ /*
+- * In order for us to be here, we know our q.key == key2, and since
+- * we took the hb->lock above, we also know that futex_requeue() has
+- * completed and we no longer have to concern ourselves with a wakeup
+- * race with the atomic proxy lock acquisition by the requeue code. The
+- * futex_requeue dropped our key1 reference and incremented our key2
+- * reference count.
++ * In order to be here, we have either been requeued, are in
++ * the process of being requeued, or requeue successfully
++ * acquired uaddr2 on our behalf. If pi_blocked_on was
++ * non-null above, we may be racing with a requeue. Do not
++ * rely on q->lock_ptr to be hb2->lock until after blocking on
++ * hb->lock or hb2->lock. The futex_requeue dropped our key1
++ * reference and incremented our key2 reference count.
+ */
++ hb2 = hash_futex(&key2);
+
+ /* Check if the requeue code acquired the second futex for us. */
+ if (!q.rt_waiter) {
+@@ -2886,14 +2933,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+ */
+ put_pi_state(q.pi_state);
+- spin_unlock(q.lock_ptr);
++ spin_unlock(&hb2->lock);
+ }
+ } else {
+ /*
+@@ -2906,7 +2954,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+ * haven't already.
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index d3f24905852c..f87aa8fdcc51 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+ {
+ irqreturn_t retval;
+ unsigned int flags = 0;
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+
+ retval = __handle_irq_event_percpu(desc, &flags);
+
+- add_interrupt_randomness(desc->irq_data.irq, flags);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ desc->random_ip = ip;
++#else
++ add_interrupt_randomness(desc->irq_data.irq, flags, ip);
++#endif
+
+ if (!noirqdebug)
+ note_interrupt(desc, retval);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 9530fcd27704..fadf8f848299 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -22,6 +22,7 @@
+ #include "internals.h"
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++# ifndef CONFIG_PREEMPT_RT_BASE
+ __read_mostly bool force_irqthreads;
+
+ static int __init setup_forced_irqthreads(char *arg)
+@@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg)
+ return 0;
+ }
+ early_param("threadirqs", setup_forced_irqthreads);
++# endif
+ #endif
+
+ static void __synchronize_hardirq(struct irq_desc *desc)
+@@ -233,7 +235,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++ swork_queue(&desc->affinity_notify->swork);
++#else
+ schedule_work(&desc->affinity_notify->work);
++#endif
+ }
+ irqd_set(data, IRQD_AFFINITY_SET);
+
+@@ -271,10 +278,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ }
+ EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+-static void irq_affinity_notify(struct work_struct *work)
++static void _irq_affinity_notify(struct irq_affinity_notify *notify)
+ {
+- struct irq_affinity_notify *notify =
+- container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+@@ -296,6 +301,35 @@ static void irq_affinity_notify(struct work_struct *work)
+ kref_put(&notify->kref, notify->release);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void init_helper_thread(void)
++{
++ static int init_sworker_once;
++
++ if (init_sworker_once)
++ return;
++ if (WARN_ON(swork_get()))
++ return;
++ init_sworker_once = 1;
++}
++
++static void irq_affinity_notify(struct swork_event *swork)
++{
++ struct irq_affinity_notify *notify =
++ container_of(swork, struct irq_affinity_notify, swork);
++ _irq_affinity_notify(notify);
++}
++
++#else
++
++static void irq_affinity_notify(struct work_struct *work)
++{
++ struct irq_affinity_notify *notify =
++ container_of(work, struct irq_affinity_notify, work);
++ _irq_affinity_notify(notify);
++}
++#endif
++
+ /**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+@@ -324,7 +358,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+ if (notify) {
+ notify->irq = irq;
+ kref_init(&notify->kref);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ INIT_SWORK(&notify->swork, irq_affinity_notify);
++ init_helper_thread();
++#else
+ INIT_WORK(&notify->work, irq_affinity_notify);
++#endif
+ }
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+@@ -879,7 +918,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+ local_bh_disable();
+ ret = action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action);
+- local_bh_enable();
++ /*
++ * Interrupts which have real time requirements can be set up
++ * to avoid softirq processing in the thread handler. This is
++ * safe as these interrupts do not raise soft interrupts.
++ */
++ if (irq_settings_no_softirq_call(desc))
++ _local_bh_enable();
++ else
++ local_bh_enable();
+ return ret;
+ }
+
+@@ -976,6 +1023,12 @@ static int irq_thread(void *data)
+ if (action_ret == IRQ_WAKE_THREAD)
+ irq_wake_secondary(desc, action);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_disable();
++ add_interrupt_randomness(action->irq, 0,
++ desc->random_ip ^ (unsigned long) action);
++ migrate_enable();
++#endif
+ wake_threads_waitq(desc);
+ }
+
+@@ -1336,6 +1389,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
+
++ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
++ irq_settings_set_no_softirq_call(desc);
++
+ /* Set default affinity mask once everything is setup */
+ setup_affinity(desc, mask);
+
+@@ -2061,7 +2117,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
+ * This call sets the internal irqchip state of an interrupt,
+ * depending on the value of @which.
+ *
+- * This function should be called with preemption disabled if the
++ * This function should be called with migration disabled if the
+ * interrupt controller has per-cpu registers.
+ */
+ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
+index 320579d89091..2df2d4445b1e 100644
+--- a/kernel/irq/settings.h
++++ b/kernel/irq/settings.h
+@@ -16,6 +16,7 @@ enum {
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
+ _IRQ_IS_POLLED = IRQ_IS_POLLED,
+ _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+ };
+
+@@ -30,6 +31,7 @@ enum {
+ #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
+ #define IRQ_IS_POLLED GOT_YOU_MORON
+ #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
+ #undef IRQF_MODIFY_MASK
+ #define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+@@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
+ desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
+ }
+
++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
++{
++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
++}
++
++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
++{
++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
++}
++
+ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+ {
+ return desc->status_use_accessors & _IRQ_PER_CPU;
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index 5707f97a3e6a..73f38dc7a7fb 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
+
+ static int __init irqfixup_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
++ return 1;
++#endif
+ irqfixup = 1;
+ printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
+ printk(KERN_WARNING "This may impact system performance.\n");
+@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
+
+ static int __init irqpoll_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
++ return 1;
++#endif
+ irqfixup = 2;
+ printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
+ "enabled\n");
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index bcf107ce0854..2899ba0d23d1 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -17,6 +17,7 @@
+ #include <linux/cpu.h>
+ #include <linux/notifier.h>
+ #include <linux/smp.h>
++#include <linux/interrupt.h>
+ #include <asm/processor.h>
+
+
+@@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
+ */
+ bool irq_work_queue_on(struct irq_work *work, int cpu)
+ {
++ struct llist_head *list;
++
+ /* All work should have been flushed before going offline */
+ WARN_ON_ONCE(cpu_is_offline(cpu));
+
+@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
+ if (!irq_work_claim(work))
+ return false;
+
+- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
++ list = &per_cpu(lazy_list, cpu);
++ else
++ list = &per_cpu(raised_list, cpu);
++
++ if (llist_add(&work->llnode, list))
+ arch_send_call_function_single_ipi(cpu);
+
+ return true;
+@@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
+ /* Enqueue the irq work @work on the current CPU */
+ bool irq_work_queue(struct irq_work *work)
+ {
++ struct llist_head *list;
++ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
++
+ /* Only queue if not already pending */
+ if (!irq_work_claim(work))
+ return false;
+@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
+ /* Queue the entry and raise the IPI if needed. */
+ preempt_disable();
+
+- /* If the work is "lazy", handle it from next tick if any */
+- if (work->flags & IRQ_WORK_LAZY) {
+- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+- tick_nohz_tick_stopped())
+- arch_irq_work_raise();
+- } else {
+- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
++ lazy_work = work->flags & IRQ_WORK_LAZY;
++
++ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
++ list = this_cpu_ptr(&lazy_list);
++ else
++ list = this_cpu_ptr(&raised_list);
++
++ if (llist_add(&work->llnode, list)) {
++ if (!lazy_work || tick_nohz_tick_stopped())
+ arch_irq_work_raise();
+ }
+
+@@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
+ raised = this_cpu_ptr(&raised_list);
+ lazy = this_cpu_ptr(&lazy_list);
+
+- if (llist_empty(raised) || arch_irq_work_has_interrupt())
+- if (llist_empty(lazy))
+- return false;
++ if (llist_empty(raised) && llist_empty(lazy))
++ return false;
+
+ /* All work should have been flushed before going offline */
+ WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
+@@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
+ struct irq_work *work;
+ struct llist_node *llnode;
+
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+
+ if (llist_empty(list))
+ return;
+@@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
+ void irq_work_run(void)
+ {
+ irq_work_run_list(this_cpu_ptr(&raised_list));
+- irq_work_run_list(this_cpu_ptr(&lazy_list));
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
++ /*
++ * NOTE: we raise softirq via IPI for safety,
++ * and execute in irq_work_tick() to move the
++ * overhead from hard to soft irq context.
++ */
++ if (!llist_empty(this_cpu_ptr(&lazy_list)))
++ raise_softirq(TIMER_SOFTIRQ);
++ } else
++ irq_work_run_list(this_cpu_ptr(&lazy_list));
+ }
+ EXPORT_SYMBOL_GPL(irq_work_run);
+
+@@ -179,8 +200,17 @@ void irq_work_tick(void)
+
+ if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
+ irq_work_run_list(raised);
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
++ irq_work_run_list(this_cpu_ptr(&lazy_list));
++}
++
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++void irq_work_tick_soft(void)
++{
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
+ }
++#endif
+
+ /*
+ * Synchronize against the irq_work @entry, ensures the entry is not
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index ee1bc1bb8feb..ddef07958840 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+
+ #endif /* CONFIG_KEXEC_CORE */
+
++#if defined(CONFIG_PREEMPT_RT_FULL)
++static ssize_t realtime_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+@@ -225,6 +234,9 @@ static struct attribute * kernel_attrs[] = {
+ &rcu_expedited_attr.attr,
+ &rcu_normal_attr.attr,
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ &realtime_attr.attr,
++#endif
+ NULL
+ };
+
+diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
+index 31322a4275cd..c6bba9299d8b 100644
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
+@@ -2,7 +2,7 @@
+ # and is generally not a function of system call inputs.
+ KCOV_INSTRUMENT := n
+
+-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
++obj-y += semaphore.o percpu-rwsem.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+ CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
+@@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
+ endif
+
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
++obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++obj-y += rwsem.o
++endif
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -25,7 +29,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
++endif
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
+ obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
+index 951cfcd10b4a..57e0ea72c28a 100644
+--- a/kernel/locking/lglock.c
++++ b/kernel/locking/lglock.c
+@@ -4,6 +4,15 @@
+ #include <linux/cpu.h>
+ #include <linux/string.h>
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define lg_lock_ptr arch_spinlock_t
++# define lg_do_lock(l) arch_spin_lock(l)
++# define lg_do_unlock(l) arch_spin_unlock(l)
++#else
++# define lg_lock_ptr struct rt_mutex
++# define lg_do_lock(l) __rt_spin_lock__no_mg(l)
++# define lg_do_unlock(l) __rt_spin_unlock(l)
++#endif
+ /*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+@@ -12,51 +21,60 @@
+
+ void lg_lock_init(struct lglock *lg, char *name)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int i;
++
++ for_each_possible_cpu(i) {
++ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
++
++ rt_mutex_init(lock);
++ }
++#endif
+ LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+ }
+ EXPORT_SYMBOL(lg_lock_init);
+
+ void lg_local_lock(struct lglock *lg)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+- preempt_disable();
++ migrate_disable();
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ EXPORT_SYMBOL(lg_local_lock);
+
+ void lg_local_unlock(struct lglock *lg)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+- arch_spin_unlock(lock);
+- preempt_enable();
++ lg_do_unlock(lock);
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(lg_local_unlock);
+
+ void lg_local_lock_cpu(struct lglock *lg, int cpu)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+- preempt_disable();
++ preempt_disable_nort();
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ EXPORT_SYMBOL(lg_local_lock_cpu);
+
+ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+- arch_spin_unlock(lock);
+- preempt_enable();
++ lg_do_unlock(lock);
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+@@ -68,30 +86,30 @@ void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
+ if (cpu2 < cpu1)
+ swap(cpu1, cpu2);
+
+- preempt_disable();
++ preempt_disable_nort();
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
+- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
++ lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
++ lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
+ }
+
+ void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
+ {
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
+- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
+- preempt_enable();
++ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
++ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
++ preempt_enable_nort();
+ }
+
+ void lg_global_lock(struct lglock *lg)
+ {
+ int i;
+
+- preempt_disable();
++ preempt_disable_nort();
+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ for_each_possible_cpu(i) {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ }
+ EXPORT_SYMBOL(lg_global_lock);
+@@ -102,10 +120,35 @@ void lg_global_unlock(struct lglock *lg)
+
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ for_each_possible_cpu(i) {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+- arch_spin_unlock(lock);
++ lg_do_unlock(lock);
+ }
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(lg_global_unlock);
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * HACK: If you use this, you get to keep the pieces.
++ * Used in queue_stop_cpus_work() when stop machinery
++ * is called from inactive CPU, so we can't schedule.
++ */
++# define lg_do_trylock_relax(l) \
++ do { \
++ while (!__rt_spin_trylock(l)) \
++ cpu_relax(); \
++ } while (0)
++
++void lg_global_trylock_relax(struct lglock *lg)
++{
++ int i;
++
++ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
++ for_each_possible_cpu(i) {
++ lg_lock_ptr *lock;
++ lock = per_cpu_ptr(lg->lock, i);
++ lg_do_trylock_relax(lock);
++ }
++}
++#endif
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 589d763a49b3..4b48c4bfb60c 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3686,6 +3686,7 @@ static void check_flags(unsigned long flags)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3700,6 +3701,7 @@ static void check_flags(unsigned long flags)
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
+ }
++#endif
+
+ if (!debug_locks)
+ print_irqtrace_events(current);
+diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
+index f8c5af52a131..788068773e61 100644
+--- a/kernel/locking/locktorture.c
++++ b/kernel/locking/locktorture.c
+@@ -26,7 +26,6 @@
+ #include <linux/kthread.h>
+ #include <linux/sched/rt.h>
+ #include <linux/spinlock.h>
+-#include <linux/rwlock.h>
+ #include <linux/mutex.h>
+ #include <linux/rwsem.h>
+ #include <linux/smp.h>
+diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
+new file mode 100644
+index 000000000000..665754c00e1e
+--- /dev/null
++++ b/kernel/locking/rt.c
+@@ -0,0 +1,498 @@
++/*
++ * kernel/rt.c
++ *
++ * Real-Time Preemption Support
++ *
++ * started by Ingo Molnar:
++ *
++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ *
++ * historic credit for proving that Linux spinlocks can be implemented via
++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
++ * and others) who prototyped it on 2.4 and did lots of comparative
++ * research and analysis; TimeSys, for proving that you can implement a
++ * fully preemptible kernel via the use of IRQ threading and mutexes;
++ * Bill Huey for persuasively arguing on lkml that the mutex model is the
++ * right one; and to MontaVista, who ported pmutexes to 2.6.
++ *
++ * This code is a from-scratch implementation and is not based on pmutexes,
++ * but the idea of converting spinlocks to mutexes is used here too.
++ *
++ * lock debugging, locking tree, deadlock detection:
++ *
++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
++ * Released under the General Public License (GPL).
++ *
++ * Includes portions of the generic R/W semaphore implementation from:
++ *
++ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
++ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
++ * - Derived also from comments by Linus
++ *
++ * Pending ownership of locks and ownership stealing:
++ *
++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
++ *
++ * (also by Steven Rostedt)
++ * - Converted single pi_lock to individual task locks.
++ *
++ * By Esben Nielsen:
++ * Doing priority inheritance with help of the scheduler.
++ *
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ * - major rework based on Esben Nielsens initial patch
++ * - replaced thread_info references by task_struct refs
++ * - removed task->pending_owner dependency
++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
++ * in the scheduler return path as discussed with Steven Rostedt
++ *
++ * Copyright (C) 2006, Kihon Technologies Inc.
++ * Steven Rostedt <rostedt@goodmis.org>
++ * - debugged and patched Thomas Gleixner's rework.
++ * - added back the cmpxchg to the rework.
++ * - turned atomic require back on for SMP.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/rtmutex.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/syscalls.h>
++#include <linux/interrupt.h>
++#include <linux/plist.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/hrtimer.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * struct mutex functions
++ */
++void __mutex_do_init(struct mutex *mutex, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
++ lockdep_init_map(&mutex->dep_map, name, key, 0);
++#endif
++ mutex->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__mutex_do_init);
++
++void __lockfunc _mutex_lock(struct mutex *lock)
++{
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock);
++
++int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible);
++
++int __lockfunc _mutex_lock_killable(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
++{
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nested);
++
++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
++{
++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nest_lock);
++
++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
++
++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable_nested);
++#endif
++
++int __lockfunc _mutex_trylock(struct mutex *lock)
++{
++ int ret = rt_mutex_trylock(&lock->lock);
++
++ if (ret)
++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_trylock);
++
++void __lockfunc _mutex_unlock(struct mutex *lock)
++{
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_unlock);
++
++/*
++ * rwlock_t functions
++ */
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = rt_mutex_trylock(&rwlock->lock);
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ ret = rt_write_trylock(rwlock);
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock_irqsave);
++
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the lock,
++ * but not when read_depth == 0 which means that the lock is
++ * write locked.
++ */
++ if (rt_mutex_owner(lock) != current) {
++ migrate_disable();
++ ret = rt_mutex_trylock(lock);
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ } else if (!rwlock->read_depth) {
++ ret = 0;
++ }
++
++ if (ret)
++ rwlock->read_depth++;
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __rt_spin_lock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++
++
++ /*
++ * recursive read locks succeed when current owns the lock
++ */
++ if (rt_mutex_owner(lock) != current) {
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __rt_spin_lock(lock);
++ }
++ rwlock->read_depth++;
++}
++
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++ migrate_enable();
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++ /* Release the lock only when read_depth is down to 0 */
++ if (--rwlock->read_depth == 0) {
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++ migrate_enable();
++ }
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_write_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_write_lock_irqsave);
++
++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_read_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_read_lock_irqsave);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++ lockdep_init_map(&rwlock->dep_map, name, key, 0);
++#endif
++ rwlock->lock.save_state = 1;
++ rwlock->read_depth = 0;
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++/*
++ * rw_semaphores
++ */
++
++void rt_up_write(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_write);
++
++void __rt_up_read(struct rw_semaphore *rwsem)
++{
++ if (--rwsem->read_depth == 0)
++ rt_mutex_unlock(&rwsem->lock);
++}
++
++void rt_up_read(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ __rt_up_read(rwsem);
++}
++EXPORT_SYMBOL(rt_up_read);
++
++/*
++ * downgrade a write lock into a read lock
++ * - just wake up any readers at the front of the queue
++ */
++void rt_downgrade_write(struct rw_semaphore *rwsem)
++{
++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
++ rwsem->read_depth = 1;
++}
++EXPORT_SYMBOL(rt_downgrade_write);
++
++int rt_down_write_trylock(struct rw_semaphore *rwsem)
++{
++ int ret = rt_mutex_trylock(&rwsem->lock);
++
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_trylock);
++
++void rt_down_write(struct rw_semaphore *rwsem)
++{
++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write);
++
++int rt_down_write_killable(struct rw_semaphore *rwsem)
++{
++ int ret;
++
++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&rwsem->lock);
++ if (ret)
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_killable);
++
++int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ int ret;
++
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&rwsem->lock);
++ if (ret)
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_killable_nested);
++
++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested);
++
++void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
++ struct lockdep_map *nest)
++{
++ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested_lock);
++
++int rt__down_read_trylock(struct rw_semaphore *rwsem)
++{
++ struct rt_mutex *lock = &rwsem->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the rwsem,
++ * but not when read_depth == 0 which means that the rwsem is
++ * write locked.
++ */
++ if (rt_mutex_owner(lock) != current)
++ ret = rt_mutex_trylock(&rwsem->lock);
++ else if (!rwsem->read_depth)
++ ret = 0;
++
++ if (ret)
++ rwsem->read_depth++;
++ return ret;
++
++}
++
++int rt_down_read_trylock(struct rw_semaphore *rwsem)
++{
++ int ret;
++
++ ret = rt__down_read_trylock(rwsem);
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_read_trylock);
++
++void rt__down_read(struct rw_semaphore *rwsem)
++{
++ struct rt_mutex *lock = &rwsem->lock;
++
++ if (rt_mutex_owner(lock) != current)
++ rt_mutex_lock(&rwsem->lock);
++ rwsem->read_depth++;
++}
++EXPORT_SYMBOL(rt__down_read);
++
++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt__down_read(rwsem);
++}
++
++void rt_down_read(struct rw_semaphore *rwsem)
++{
++ __rt_down_read(rwsem, 0);
++}
++EXPORT_SYMBOL(rt_down_read);
++
++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ __rt_down_read(rwsem, subclass);
++}
++EXPORT_SYMBOL(rt_down_read_nested);
++
++void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
++ lockdep_init_map(&rwsem->dep_map, name, key, 0);
++#endif
++ rwsem->read_depth = 0;
++ rwsem->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__rt_rwsem_init);
++
++/**
++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
++ * @cnt: the atomic which we are to dec
++ * @lock: the mutex to return holding if we dec to 0
++ *
++ * return true and hold lock if we dec to 0, return false otherwise
++ */
++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
++{
++ /* dec if we can't possibly hit 0 */
++ if (atomic_add_unless(cnt, -1, 1))
++ return 0;
++ /* we might hit 0, so take the lock */
++ mutex_lock(lock);
++ if (!atomic_dec_and_test(cnt)) {
++ /* when we actually did the dec, we didn't hit 0 */
++ mutex_unlock(lock);
++ return 0;
++ }
++ /* we hit 0, and we hold the lock */
++ return 1;
++}
++EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 1ec0f48962b3..2576f7ccf8e2 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -7,6 +7,11 @@
+ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
++ * Adaptive Spinlocks:
++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
++ * and Peter Morreale,
++ * Adaptive Spinlocks simplification:
++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
+ *
+ * See Documentation/locking/rt-mutex-design.txt for details.
+ */
+@@ -16,6 +21,7 @@
+ #include <linux/sched/rt.h>
+ #include <linux/sched/deadline.h>
+ #include <linux/timer.h>
++#include <linux/ww_mutex.h>
+
+ #include "rtmutex_common.h"
+
+@@ -69,6 +75,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+ clear_rt_mutex_waiters(lock);
+ }
+
++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
++{
++ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
++ waiter != PI_REQUEUE_INPROGRESS;
++}
++
+ /*
+ * We can speed up the acquire/release, if there's no debugging state to be
+ * set up.
+@@ -350,6 +362,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
+ return debug_rt_mutex_detect_deadlock(waiter, chwalk);
+ }
+
++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
++{
++ if (waiter->savestate)
++ wake_up_lock_sleeper(waiter->task);
++ else
++ wake_up_process(waiter->task);
++}
++
+ /*
+ * Max number of times we'll walk the boosting chain:
+ */
+@@ -357,7 +377,8 @@ int max_lock_depth = 1024;
+
+ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+ {
+- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
++ return rt_mutex_real_waiter(p->pi_blocked_on) ?
++ p->pi_blocked_on->lock : NULL;
+ }
+
+ /*
+@@ -493,7 +514,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+- if (!waiter)
++ if (!rt_mutex_real_waiter(waiter))
+ goto out_unlock_pi;
+
+ /*
+@@ -655,13 +676,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * follow here. This is the end of the chain we are walking.
+ */
+ if (!rt_mutex_owner(lock)) {
++ struct rt_mutex_waiter *lock_top_waiter;
++
+ /*
+ * If the requeue [7] above changed the top waiter,
+ * then we need to wake the new top waiter up to try
+ * to get the lock.
+ */
+- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+- wake_up_process(rt_mutex_top_waiter(lock)->task);
++ lock_top_waiter = rt_mutex_top_waiter(lock);
++ if (prerequeue_top_waiter != lock_top_waiter)
++ rt_mutex_wake_waiter(lock_top_waiter);
+ raw_spin_unlock_irq(&lock->wait_lock);
+ return 0;
+ }
+@@ -754,6 +778,25 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ return ret;
+ }
+
++
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++/*
++ * Note that RT tasks are excluded from lateral-steals to prevent the
++ * introduction of an unbounded latency
++ */
++static inline int lock_is_stealable(struct task_struct *task,
++ struct task_struct *pendowner, int mode)
++{
++ if (mode == STEAL_NORMAL || rt_task(task)) {
++ if (task->prio >= pendowner->prio)
++ return 0;
++ } else if (task->prio > pendowner->prio)
++ return 0;
++ return 1;
++}
++
+ /*
+ * Try to take an rt-mutex
+ *
+@@ -764,8 +807,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * @waiter: The waiter that is queued to the lock's wait tree if the
+ * callsite called task_blocked_on_lock(), otherwise NULL
+ */
+-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+- struct rt_mutex_waiter *waiter)
++static int __try_to_take_rt_mutex(struct rt_mutex *lock,
++ struct task_struct *task,
++ struct rt_mutex_waiter *waiter, int mode)
+ {
+ /*
+ * Before testing whether we can acquire @lock, we set the
+@@ -802,8 +846,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ * If waiter is not the highest priority waiter of
+ * @lock, give up.
+ */
+- if (waiter != rt_mutex_top_waiter(lock))
++ if (waiter != rt_mutex_top_waiter(lock)) {
++ /* XXX lock_is_stealable() ? */
+ return 0;
++ }
+
+ /*
+ * We can acquire the lock. Remove the waiter from the
+@@ -821,14 +867,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ * not need to be dequeued.
+ */
+ if (rt_mutex_has_waiters(lock)) {
+- /*
+- * If @task->prio is greater than or equal to
+- * the top waiter priority (kernel view),
+- * @task lost.
+- */
+- if (task->prio >= rt_mutex_top_waiter(lock)->prio)
+- return 0;
++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
+
++ if (task != pown && !lock_is_stealable(task, pown, mode))
++ return 0;
+ /*
+ * The current top waiter stays enqueued. We
+ * don't have to change anything in the lock
+@@ -877,6 +919,438 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * preemptible spin_lock functions:
++ */
++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock,
++ bool mg_off),
++ bool do_mig_dis)
++{
++ might_sleep_no_state_check();
++
++ if (do_mig_dis)
++ migrate_disable();
++
++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
++ rt_mutex_deadlock_account_lock(lock, current);
++ else
++ slowfn(lock, do_mig_dis);
++}
++
++static inline int rt_spin_lock_fastunlock(struct rt_mutex *lock,
++ int (*slowfn)(struct rt_mutex *lock))
++{
++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
++ rt_mutex_deadlock_account_unlock(current);
++ return 0;
++ }
++ return slowfn(lock);
++}
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *owner)
++{
++ int res = 0;
++
++ rcu_read_lock();
++ for (;;) {
++ if (owner != rt_mutex_owner(lock))
++ break;
++ /*
++ * Ensure that owner->on_cpu is dereferenced _after_
++ * checking the above to be valid.
++ */
++ barrier();
++ if (!owner->on_cpu) {
++ res = 1;
++ break;
++ }
++ cpu_relax();
++ }
++ rcu_read_unlock();
++ return res;
++}
++#else
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *orig_owner)
++{
++ return 1;
++}
++#endif
++
++static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ struct task_struct *task,
++ enum rtmutex_chainwalk chwalk);
++/*
++ * Slow path lock function spin_lock style: this variant is very
++ * careful not to miss any non-lock wakeups.
++ *
++ * We store the current state under p->pi_lock in p->saved_state and
++ * the try_to_wake_up() code handles this accordingly.
++ */
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
++ bool mg_off)
++{
++ struct task_struct *lock_owner, *self = current;
++ struct rt_mutex_waiter waiter, *top_waiter;
++ unsigned long flags;
++ int ret;
++
++ rt_mutex_init_waiter(&waiter, true);
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ return;
++ }
++
++ BUG_ON(rt_mutex_owner(lock) == self);
++
++ /*
++ * We save whatever state the task is in and we'll restore it
++ * after acquiring the lock taking real wakeups into account
++ * as well. We are serialized via pi_lock against wakeups. See
++ * try_to_wake_up().
++ */
++ raw_spin_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++
++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
++ BUG_ON(ret);
++
++ for (;;) {
++ /* Try to acquire the lock again. */
++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
++ break;
++
++ top_waiter = rt_mutex_top_waiter(lock);
++ lock_owner = rt_mutex_owner(lock);
++
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++
++ debug_rt_mutex_print_deadlock(&waiter);
++
++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
++ if (mg_off)
++ migrate_enable();
++ schedule();
++ if (mg_off)
++ migrate_disable();
++ }
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++ }
++
++ /*
++ * Restore the task state to current->saved_state. We set it
++ * to the original state above and the try_to_wake_up() code
++ * has possibly updated it when a real (non-rtmutex) wakeup
++ * happened while we were blocked. Clear saved_state so
++ * try_to_wakeup() does not get confused.
++ */
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ raw_spin_unlock(&self->pi_lock);
++
++ /*
++ * try_to_take_rt_mutex() sets the waiter bit
++ * unconditionally. We might have to fix that up:
++ */
++ fixup_rt_mutex_waiters(lock);
++
++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
++ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
++
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q,
++ struct rt_mutex *lock);
++/*
++ * Slow path to release a rt_mutex spin_lock style
++ */
++static int noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++ unsigned long flags;
++ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ debug_rt_mutex_unlock(lock);
++
++ rt_mutex_deadlock_account_unlock(current);
++
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ return 0;
++ }
++
++ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
++
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
++
++ /* Undo pi boosting.when necessary */
++ rt_mutex_adjust_prio(current);
++ return 0;
++}
++
++static int noinline __sched rt_spin_lock_slowunlock_no_deboost(struct rt_mutex *lock)
++{
++ unsigned long flags;
++ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ debug_rt_mutex_unlock(lock);
++
++ rt_mutex_deadlock_account_unlock(current);
++
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ return 0;
++ }
++
++ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
++
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
++ return 1;
++}
++
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock__no_mg);
++
++void __lockfunc rt_spin_lock(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock);
++
++void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
++}
++EXPORT_SYMBOL(__rt_spin_lock);
++
++void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
++}
++EXPORT_SYMBOL(__rt_spin_lock__no_mg);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
++{
++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
++}
++EXPORT_SYMBOL(rt_spin_lock_nested);
++#endif
++
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock__no_mg);
++
++void __lockfunc rt_spin_unlock(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++ migrate_enable();
++}
++EXPORT_SYMBOL(rt_spin_unlock);
++
++int __lockfunc rt_spin_unlock_no_deboost(spinlock_t *lock)
++{
++ int ret;
++
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ ret = rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_no_deboost);
++ migrate_enable();
++ return ret;
++}
++
++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(__rt_spin_unlock);
++
++/*
++ * Wait for the lock to get unlocked: instead of polling for an unlock
++ * (like raw spinlocks do), we lock and unlock, to force the kernel to
++ * schedule if there's contention:
++ */
++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
++{
++ spin_lock(lock);
++ spin_unlock(lock);
++}
++EXPORT_SYMBOL(rt_spin_unlock_wait);
++
++int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
++{
++ return rt_mutex_trylock(lock);
++}
++
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
++{
++ int ret;
++
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock__no_mg);
++
++int __lockfunc rt_spin_trylock(spinlock_t *lock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock);
++
++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
++{
++ int ret;
++
++ local_bh_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ migrate_disable();
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ } else
++ local_bh_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_bh);
++
++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ migrate_disable();
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_irqsave);
++
++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
++{
++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
++ if (atomic_add_unless(atomic, -1, 1))
++ return 0;
++ rt_spin_lock(lock);
++ if (atomic_dec_and_test(atomic))
++ return 1;
++ rt_spin_unlock(lock);
++ return 0;
++}
++EXPORT_SYMBOL(atomic_dec_and_spin_lock);
++
++ void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++}
++EXPORT_SYMBOL(__rt_spin_lock_init);
++
++#endif /* PREEMPT_RT_FULL */
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ static inline int __sched
++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
++ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
++
++ if (!hold_ctx)
++ return 0;
++
++ if (unlikely(ctx == hold_ctx))
++ return -EALREADY;
++
++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
++#ifdef CONFIG_DEBUG_MUTEXES
++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
++ ctx->contending_lock = ww;
++#endif
++ return -EDEADLK;
++ }
++
++ return 0;
++}
++#else
++ static inline int __sched
++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ BUG();
++ return 0;
++}
++
++#endif
++
++static inline int
++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter)
++{
++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
++}
++
+ /*
+ * Task blocks on lock.
+ *
+@@ -907,6 +1381,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ return -EDEADLK;
+
+ raw_spin_lock(&task->pi_lock);
++
++ /*
++ * In the case of futex requeue PI, this will be a proxy
++ * lock. The task will wake unaware that it is enqueueed on
++ * this lock. Avoid blocking on two locks and corrupting
++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
++ * flag. futex_wait_requeue_pi() sets this when it wakes up
++ * before requeue (due to a signal or timeout). Do not enqueue
++ * the task if PI_WAKEUP_INPROGRESS is set.
++ */
++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
++ raw_spin_unlock(&task->pi_lock);
++ return -EAGAIN;
++ }
++
++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
++
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;
+ waiter->lock = lock;
+@@ -930,7 +1421,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ rt_mutex_enqueue_pi(owner, waiter);
+
+ __rt_mutex_adjust_prio(owner);
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
+ chain_walk = 1;
+@@ -972,6 +1463,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ * Called with lock->wait_lock held and interrupts disabled.
+ */
+ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q,
+ struct rt_mutex *lock)
+ {
+ struct rt_mutex_waiter *waiter;
+@@ -1000,7 +1492,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+
+ raw_spin_unlock(&current->pi_lock);
+
+- wake_q_add(wake_q, waiter->task);
++ if (waiter->savestate)
++ wake_q_add(wake_sleeper_q, waiter->task);
++ else
++ wake_q_add(wake_q, waiter->task);
+ }
+
+ /*
+@@ -1014,7 +1509,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ {
+ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
+ struct task_struct *owner = rt_mutex_owner(lock);
+- struct rt_mutex *next_lock;
++ struct rt_mutex *next_lock = NULL;
+
+ raw_spin_lock(&current->pi_lock);
+ rt_mutex_dequeue(lock, waiter);
+@@ -1038,7 +1533,8 @@ static void remove_waiter(struct rt_mutex *lock,
+ __rt_mutex_adjust_prio(owner);
+
+ /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
++ next_lock = task_blocked_on_lock(owner);
+
+ raw_spin_unlock(&owner->pi_lock);
+
+@@ -1074,17 +1570,17 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || (waiter->prio == task->prio &&
++ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
+ !dl_prio(task->prio))) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+ next_lock = waiter->lock;
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+ next_lock, NULL, task);
+ }
+@@ -1102,7 +1598,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ static int __sched
+ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- struct rt_mutex_waiter *waiter)
++ struct rt_mutex_waiter *waiter,
++ struct ww_acquire_ctx *ww_ctx)
+ {
+ int ret = 0;
+
+@@ -1125,6 +1622,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ break;
+ }
+
++ if (ww_ctx && ww_ctx->acquired > 0) {
++ ret = __mutex_lock_check_stamp(lock, ww_ctx);
++ if (ret)
++ break;
++ }
++
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(waiter);
+@@ -1159,21 +1662,96 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ }
+ }
+
++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
++ struct ww_acquire_ctx *ww_ctx)
++{
++#ifdef CONFIG_DEBUG_MUTEXES
++ /*
++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
++ * but released with a normal mutex_unlock in this call.
++ *
++ * This should never happen, always use ww_mutex_unlock.
++ */
++ DEBUG_LOCKS_WARN_ON(ww->ctx);
++
++ /*
++ * Not quite done after calling ww_acquire_done() ?
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
++
++ if (ww_ctx->contending_lock) {
++ /*
++ * After -EDEADLK you tried to
++ * acquire a different ww_mutex? Bad!
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
++
++ /*
++ * You called ww_mutex_lock after receiving -EDEADLK,
++ * but 'forgot' to unlock everything else first?
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
++ ww_ctx->contending_lock = NULL;
++ }
++
++ /*
++ * Naughty, using a different class will lead to undefined behavior!
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
++#endif
++ ww_ctx->acquired++;
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void ww_mutex_account_lock(struct rt_mutex *lock,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
++ struct rt_mutex_waiter *waiter, *n;
++
++ /*
++ * This branch gets optimized out for the common case,
++ * and is only important for ww_mutex_lock.
++ */
++ ww_mutex_lock_acquired(ww, ww_ctx);
++ ww->ctx = ww_ctx;
++
++ /*
++ * Give any possible sleeping processes the chance to wake up,
++ * so they can recheck if they have to back off.
++ */
++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
++ tree_entry) {
++ /* XXX debug rt mutex waiter wakeup */
++
++ BUG_ON(waiter->lock != lock);
++ rt_mutex_wake_waiter(waiter);
++ }
++}
++
++#else
++
++static void ww_mutex_account_lock(struct rt_mutex *lock,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ BUG();
++}
++#endif
++
+ /*
+ * Slow path lock function:
+ */
+ static int __sched
+ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk)
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx)
+ {
+ struct rt_mutex_waiter waiter;
+ unsigned long flags;
+ int ret = 0;
+
+- debug_rt_mutex_init_waiter(&waiter);
+- RB_CLEAR_NODE(&waiter.pi_tree_entry);
+- RB_CLEAR_NODE(&waiter.tree_entry);
++ rt_mutex_init_waiter(&waiter, false);
+
+ /*
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
+@@ -1187,6 +1765,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
++ if (ww_ctx)
++ ww_mutex_account_lock(lock, ww_ctx);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return 0;
+ }
+@@ -1201,13 +1781,23 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+
+ if (likely(!ret))
+ /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
++ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
++ ww_ctx);
++ else if (ww_ctx) {
++ /* ww_mutex received EDEADLK, let it become EALREADY */
++ ret = __mutex_lock_check_stamp(lock, ww_ctx);
++ BUG_ON(!ret);
++ }
+
+ if (unlikely(ret)) {
+ __set_current_state(TASK_RUNNING);
+ if (rt_mutex_has_waiters(lock))
+ remove_waiter(lock, &waiter);
+- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++ /* ww_mutex want to report EDEADLK/EALREADY, let them */
++ if (!ww_ctx)
++ rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++ } else if (ww_ctx) {
++ ww_mutex_account_lock(lock, ww_ctx);
+ }
+
+ /*
+@@ -1267,7 +1857,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
+ * Return whether the current task needs to undo a potential priority boosting.
+ */
+ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+- struct wake_q_head *wake_q)
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q)
+ {
+ unsigned long flags;
+
+@@ -1323,7 +1914,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ *
+ * Queue the next waiter for wakeup once we release the wait_lock.
+ */
+- mark_wakeup_next_waiter(wake_q, lock);
++ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+@@ -1339,31 +1930,36 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ */
+ static inline int
+ rt_mutex_fastlock(struct rt_mutex *lock, int state,
++ struct ww_acquire_ctx *ww_ctx,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx))
+ {
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
++ ww_ctx);
+ }
+
+ static inline int
+ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx))
+ {
+ if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+- return slowfn(lock, state, timeout, chwalk);
++ return slowfn(lock, state, timeout, chwalk, ww_ctx);
+ }
+
+ static inline int
+@@ -1380,17 +1976,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ static inline void
+ rt_mutex_fastunlock(struct rt_mutex *lock,
+ bool (*slowfn)(struct rt_mutex *lock,
+- struct wake_q_head *wqh))
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper))
+ {
+ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
+
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+ rt_mutex_deadlock_account_unlock(current);
+
+ } else {
+- bool deboost = slowfn(lock, &wake_q);
++ bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
+
+ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
+
+ /* Undo pi boosting if necessary: */
+ if (deboost)
+@@ -1407,7 +2006,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
+ {
+ might_sleep();
+
+- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
++ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+
+@@ -1424,7 +2023,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ {
+ might_sleep();
+
+- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
++ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+@@ -1437,11 +2036,30 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
+ might_sleep();
+
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+- RT_MUTEX_FULL_CHAINWALK,
++ RT_MUTEX_FULL_CHAINWALK, NULL,
+ rt_mutex_slowlock);
+ }
+
+ /**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock: the rt_mutex to be locked
++ * @detect_deadlock: deadlock detection on/off
++ *
++ * Returns:
++ * 0 on success
++ * -EINTR when interrupted by a signal
++ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
++{
++ might_sleep();
++
++ return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
++/**
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
+@@ -1461,6 +2079,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
+
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+ RT_MUTEX_MIN_CHAINWALK,
++ NULL,
+ rt_mutex_slowlock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1478,7 +2097,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+ */
+ int __sched rt_mutex_trylock(struct rt_mutex *lock)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (WARN_ON_ONCE(in_irq() || in_nmi()))
++#else
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
++#endif
+ return 0;
+
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+@@ -1504,13 +2127,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+ * required or not.
+ */
+ bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh)
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper)
+ {
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+ rt_mutex_deadlock_account_unlock(current);
+ return false;
+ }
+- return rt_mutex_slowunlock(lock, wqh);
++ return rt_mutex_slowunlock(lock, wqh, wq_sleeper);
+ }
+
+ /**
+@@ -1543,13 +2167,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+ {
+ lock->owner = NULL;
+- raw_spin_lock_init(&lock->wait_lock);
+ lock->waiters = RB_ROOT;
+ lock->waiters_leftmost = NULL;
+
+ debug_rt_mutex_init(lock, name);
+ }
+-EXPORT_SYMBOL_GPL(__rt_mutex_init);
++EXPORT_SYMBOL(__rt_mutex_init);
+
+ /**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+@@ -1564,7 +2187,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+ {
+- __rt_mutex_init(lock, NULL);
++ rt_mutex_init(lock);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ rt_mutex_set_owner(lock, proxy_owner);
+ rt_mutex_deadlock_account_lock(lock, proxy_owner);
+@@ -1612,6 +2235,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * In PREEMPT_RT there's an added race.
++ * If the task, that we are about to requeue, times out,
++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
++ * to skip this task. But right after the task sets
++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
++ * This will replace the PI_WAKEUP_INPROGRESS with the actual
++ * lock that it blocks on. We *must not* place this task
++ * on this proxy lock in that case.
++ *
++ * To prevent this race, we first take the task's pi_lock
++ * and check if it has updated its pi_blocked_on. If it has,
++ * we assume that it woke up and we return -EAGAIN.
++ * Otherwise, we set the task's pi_blocked_on to
++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
++ * it will know that we are in the process of requeuing it.
++ */
++ raw_spin_lock(&task->pi_lock);
++ if (task->pi_blocked_on) {
++ raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
++ return -EAGAIN;
++ }
++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
++ raw_spin_unlock(&task->pi_lock);
++#endif
++
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task,
+ RT_MUTEX_FULL_CHAINWALK);
+@@ -1626,7 +2278,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ ret = 0;
+ }
+
+- if (unlikely(ret))
++ if (ret && rt_mutex_has_waiters(lock))
+ remove_waiter(lock, waiter);
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+@@ -1682,7 +2334,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
+
+ if (unlikely(ret))
+ remove_waiter(lock, waiter);
+@@ -1697,3 +2349,89 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+
+ return ret;
+ }
++
++static inline int
++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
++ unsigned tmp;
++
++ if (ctx->deadlock_inject_countdown-- == 0) {
++ tmp = ctx->deadlock_inject_interval;
++ if (tmp > UINT_MAX/4)
++ tmp = UINT_MAX;
++ else
++ tmp = tmp*2 + tmp + tmp/2;
++
++ ctx->deadlock_inject_interval = tmp;
++ ctx->deadlock_inject_countdown = tmp;
++ ctx->contending_lock = lock;
++
++ ww_mutex_unlock(lock);
++
++ return -EDEADLK;
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++int __sched
++__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++{
++ int ret;
++
++ might_sleep();
++
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
++ if (ret)
++ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
++ else if (!ret && ww_ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ww_ctx);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
++
++int __sched
++__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++{
++ int ret;
++
++ might_sleep();
++
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
++ if (ret)
++ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
++ else if (!ret && ww_ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ww_ctx);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__ww_mutex_lock);
++
++void __sched ww_mutex_unlock(struct ww_mutex *lock)
++{
++ int nest = !!lock->ctx;
++
++ /*
++ * The unlocking fastpath is the 0->1 transition from 'locked'
++ * into 'unlocked' state:
++ */
++ if (nest) {
++#ifdef CONFIG_DEBUG_MUTEXES
++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
++#endif
++ if (lock->ctx->acquired > 0)
++ lock->ctx->acquired--;
++ lock->ctx = NULL;
++ }
++
++ mutex_release(&lock->base.dep_map, nest, _RET_IP_);
++ rt_mutex_unlock(&lock->base.lock);
++}
++EXPORT_SYMBOL(ww_mutex_unlock);
++#endif
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 4f5f83c7d2d3..289f062f26cd 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -27,6 +27,7 @@ struct rt_mutex_waiter {
+ struct rb_node pi_tree_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
++ bool savestate;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ unsigned long ip;
+ struct pid *deadlock_task_pid;
+@@ -97,6 +98,9 @@ enum rtmutex_chainwalk {
+ /*
+ * PI-futex support (proxy locking functions, etc.):
+ */
++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
++
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+@@ -110,7 +114,8 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter);
+ extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
+ extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh);
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper);
+ extern void rt_mutex_adjust_prio(struct task_struct *task);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+@@ -119,4 +124,14 @@ extern void rt_mutex_adjust_prio(struct task_struct *task);
+ # include "rtmutex.h"
+ #endif
+
++static inline void
++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
++{
++ debug_rt_mutex_init_waiter(waiter);
++ waiter->task = NULL;
++ waiter->savestate = savestate;
++ RB_CLEAR_NODE(&waiter->pi_tree_entry);
++ RB_CLEAR_NODE(&waiter->tree_entry);
++}
++
+ #endif
+diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
+index db3ccb1dd614..909779647bd1 100644
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
+ * __[spin|read|write]_lock_bh()
+ */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+
+ #endif
+
+@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
+index 0374a596cffa..94970338d518 100644
+--- a/kernel/locking/spinlock_debug.c
++++ b/kernel/locking/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
+ }
+
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
+ arch_spin_unlock(&lock->raw_lock);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ if (!debug_locks_off())
+@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+ debug_write_unlock(lock);
+ arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
+diff --git a/kernel/panic.c b/kernel/panic.c
+index ca8cea1ef673..6b698115f003 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -449,9 +449,11 @@ static u64 oops_id;
+
+ static int init_oops_id(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+ else
++#endif
+ oops_id++;
+
+ return 0;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 33c79b6105c5..f53375bc77df 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -286,6 +286,8 @@ static int create_image(int platform_mode)
+
+ local_irq_disable();
+
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (error) {
+ printk(KERN_ERR "PM: Some system devices failed to power down, "
+@@ -315,6 +317,7 @@ static int create_image(int platform_mode)
+ syscore_resume();
+
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -444,6 +447,7 @@ static int resume_target_kernel(bool platform_mode)
+ goto Enable_cpus;
+
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+
+ error = syscore_suspend();
+ if (error)
+@@ -477,6 +481,7 @@ static int resume_target_kernel(bool platform_mode)
+ syscore_resume();
+
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -562,6 +567,7 @@ int hibernation_platform_enter(void)
+ goto Enable_cpus;
+
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+ syscore_suspend();
+ if (pm_wakeup_pending()) {
+ error = -EAGAIN;
+@@ -574,6 +580,7 @@ int hibernation_platform_enter(void)
+
+ Power_up:
+ syscore_resume();
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -674,6 +681,10 @@ static int load_image_and_restore(void)
+ return error;
+ }
+
++#ifndef CONFIG_SUSPEND
++bool pm_in_action;
++#endif
++
+ /**
+ * hibernate - Carry out system hibernation, including saving the image.
+ */
+@@ -687,6 +698,8 @@ int hibernate(void)
+ return -EPERM;
+ }
+
++ pm_in_action = true;
++
+ lock_system_sleep();
+ /* The snapshot device should not be opened while we're running */
+ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+@@ -764,6 +777,7 @@ int hibernate(void)
+ atomic_inc(&snapshot_device_available);
+ Unlock:
+ unlock_system_sleep();
++ pm_in_action = false;
+ return error;
+ }
+
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 0acab9d7f96f..aac06aad757c 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -361,6 +361,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
+
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (!error) {
+ *wakeup = pm_wakeup_pending();
+@@ -377,6 +379,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
+ syscore_resume();
+ }
+
++ system_state = SYSTEM_RUNNING;
++
+ arch_suspend_enable_irqs();
+ BUG_ON(irqs_disabled());
+
+@@ -519,6 +523,8 @@ static int enter_state(suspend_state_t state)
+ return error;
+ }
+
++bool pm_in_action;
++
+ /**
+ * pm_suspend - Externally visible function for suspending the system.
+ * @state: System sleep state to enter.
+@@ -533,6 +539,8 @@ int pm_suspend(suspend_state_t state)
+ if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
+ return -EINVAL;
+
++ pm_in_action = true;
++
+ error = enter_state(state);
+ if (error) {
+ suspend_stats.fail++;
+@@ -540,6 +548,7 @@ int pm_suspend(suspend_state_t state)
+ } else {
+ suspend_stats.success++;
+ }
++ pm_in_action = false;
+ return error;
+ }
+ EXPORT_SYMBOL(pm_suspend);
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index eea6dbc2d8cf..6f01c7ecb45e 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -351,6 +351,65 @@ __packed __aligned(4)
+ */
+ DEFINE_RAW_SPINLOCK(logbuf_lock);
+
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++static void early_vprintk(const char *fmt, va_list ap)
++{
++ if (early_console) {
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++
++ early_console->write(early_console, buf, n);
++ }
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
++}
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static bool __read_mostly printk_killswitch;
++
++static int __init force_early_printk_setup(char *str)
++{
++ printk_killswitch = true;
++ return 0;
++}
++early_param("force_early_printk", force_early_printk_setup);
++
++void printk_kill(void)
++{
++ printk_killswitch = true;
++}
++
++#ifdef CONFIG_PRINTK
++static int forced_early_printk(const char *fmt, va_list ap)
++{
++ if (!printk_killswitch)
++ return 0;
++ early_vprintk(fmt, ap);
++ return 1;
++}
++#endif
++
++#else
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++ return 0;
++}
++#endif
++
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+@@ -1340,6 +1399,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ {
+ char *text;
+ int len = 0;
++ int attempts = 0;
+
+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+ if (!text)
+@@ -1351,6 +1411,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ u64 seq;
+ u32 idx;
+ enum log_flags prev;
++ int num_msg;
++try_again:
++ attempts++;
++ if (attempts > 10) {
++ len = -EBUSY;
++ goto out;
++ }
++ num_msg = 0;
+
+ /*
+ * Find first record that fits, including all following records,
+@@ -1366,6 +1434,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ prev = msg->flags;
+ idx = log_next(idx);
+ seq++;
++ num_msg++;
++ if (num_msg > 5) {
++ num_msg = 0;
++ raw_spin_unlock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
++ if (clear_seq < log_first_seq)
++ goto try_again;
++ }
+ }
+
+ /* move first record forward until length fits into the buffer */
+@@ -1379,6 +1455,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ prev = msg->flags;
+ idx = log_next(idx);
+ seq++;
++ num_msg++;
++ if (num_msg > 5) {
++ num_msg = 0;
++ raw_spin_unlock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
++ if (clear_seq < log_first_seq)
++ goto try_again;
++ }
+ }
+
+ /* last message fitting into this dump */
+@@ -1419,6 +1503,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ clear_seq = log_next_seq;
+ clear_idx = log_next_idx;
+ }
++out:
+ raw_spin_unlock_irq(&logbuf_lock);
+
+ kfree(text);
+@@ -1572,6 +1657,12 @@ static void call_console_drivers(int level,
+ if (!console_drivers)
+ return;
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
++ if (in_irq() || in_nmi())
++ return;
++ }
++
++ migrate_disable();
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -1587,6 +1678,7 @@ static void call_console_drivers(int level,
+ else
+ con->write(con, text, len);
+ }
++ migrate_enable();
+ }
+
+ /*
+@@ -1750,6 +1842,13 @@ asmlinkage int vprintk_emit(int facility, int level,
+ /* cpu currently holding logbuf_lock in this function */
+ static unsigned int logbuf_cpu = UINT_MAX;
+
++ /*
++ * Fall back to early_printk if a debugging subsystem has
++ * killed printk output
++ */
++ if (unlikely(forced_early_printk(fmt, args)))
++ return 1;
++
+ if (level == LOGLEVEL_SCHED) {
+ level = LOGLEVEL_DEFAULT;
+ in_sched = true;
+@@ -1894,13 +1993,23 @@ asmlinkage int vprintk_emit(int facility, int level,
+
+ /* If called from the scheduler, we can not call up(). */
+ if (!in_sched) {
++ int may_trylock = 1;
++
+ lockdep_off();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * we can't take a sleeping lock with IRQs or preeption disabled
++ * so we can't print in these contexts
++ */
++ if (!(preempt_count() == 0 && !irqs_disabled()))
++ may_trylock = 0;
++#endif
+ /*
+ * Try to acquire and then immediately release the console
+ * semaphore. The release will print out buffers and wake up
+ * /dev/kmsg and syslog() users.
+ */
+- if (console_trylock())
++ if (may_trylock && console_trylock())
+ console_unlock();
+ lockdep_on();
+ }
+@@ -2023,26 +2132,6 @@ DEFINE_PER_CPU(printk_func_t, printk_func);
+
+ #endif /* CONFIG_PRINTK */
+
+-#ifdef CONFIG_EARLY_PRINTK
+-struct console *early_console;
+-
+-asmlinkage __visible void early_printk(const char *fmt, ...)
+-{
+- va_list ap;
+- char buf[512];
+- int n;
+-
+- if (!early_console)
+- return;
+-
+- va_start(ap, fmt);
+- n = vscnprintf(buf, sizeof(buf), fmt, ap);
+- va_end(ap);
+-
+- early_console->write(early_console, buf, n);
+-}
+-#endif
+-
+ static int __add_preferred_console(char *name, int idx, char *options,
+ char *brl_options)
+ {
+@@ -2312,11 +2401,16 @@ static void console_cont_flush(char *text, size_t size)
+ goto out;
+
+ len = cont_print_text(text, size);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(cont.level, NULL, 0, text, len);
++#else
+ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings();
+ call_console_drivers(cont.level, NULL, 0, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
++#endif
+ return;
+ out:
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+@@ -2440,13 +2534,17 @@ void console_unlock(void)
+ console_idx = log_next(console_idx);
+ console_seq++;
+ console_prev = msg->flags;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(level, ext_text, ext_len, text, len);
++#else
+ raw_spin_unlock(&logbuf_lock);
+
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_drivers(level, ext_text, ext_len, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
+-
++#endif
+ if (do_cond_resched)
+ cond_resched();
+ }
+@@ -2498,6 +2596,11 @@ void console_unblank(void)
+ {
+ struct console *c;
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
++ if (in_irq() || in_nmi())
++ return;
++ }
++
+ /*
+ * console_unblank can no longer be called in interrupt context unless
+ * oops_in_progress is set to 1..
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 1d3b7665d0be..ce666639789d 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -128,7 +128,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+
+ spin_lock_irq(&task->sighand->siglock);
+ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+- task->state = __TASK_TRACED;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ if (task->state & __TASK_TRACED)
++ task->state = __TASK_TRACED;
++ else
++ task->saved_state = __TASK_TRACED;
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ ret = true;
+ }
+ spin_unlock_irq(&task->sighand->siglock);
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 971e2b138063..a304670fb917 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops = {
+ .name = "rcu"
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Definitions for rcu_bh torture testing.
+ */
+@@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
+ .name = "rcu_bh"
+ };
+
++#else
++static struct rcu_torture_ops rcu_bh_ops = {
++ .ttype = INVALID_RCU_FLAVOR,
++};
++#endif
++
+ /*
+ * Don't even think about trying any of these in real life!!!
+ * The names includes "busted", and they really means it!
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 5d80925e7fc8..2b4bc2b2c25a 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -56,6 +56,11 @@
+ #include <linux/random.h>
+ #include <linux/trace_events.h>
+ #include <linux/suspend.h>
++#include <linux/delay.h>
++#include <linux/gfp.h>
++#include <linux/oom.h>
++#include <linux/smpboot.h>
++#include "../time/tick-internal.h"
+
+ #include "tree.h"
+ #include "rcu.h"
+@@ -259,6 +264,19 @@ void rcu_sched_qs(void)
+ this_cpu_ptr(&rcu_sched_data), true);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void rcu_preempt_qs(void);
++
++void rcu_bh_qs(void)
++{
++ unsigned long flags;
++
++ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
++ local_irq_save(flags);
++ rcu_preempt_qs();
++ local_irq_restore(flags);
++}
++#else
+ void rcu_bh_qs(void)
+ {
+ if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
+@@ -268,6 +286,7 @@ void rcu_bh_qs(void)
+ __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
+ }
+ }
++#endif
+
+ static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+@@ -448,11 +467,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+ /*
+ * Return the number of RCU BH batches started thus far for debug & stats.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ unsigned long rcu_batches_started_bh(void)
+ {
+ return rcu_bh_state.gpnum;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
++#endif
+
+ /*
+ * Return the number of RCU batches completed thus far for debug & stats.
+@@ -472,6 +493,7 @@ unsigned long rcu_batches_completed_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Return the number of RCU BH batches completed thus far for debug & stats.
+ */
+@@ -480,6 +502,7 @@ unsigned long rcu_batches_completed_bh(void)
+ return rcu_bh_state.completed;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
++#endif
+
+ /*
+ * Return the number of RCU expedited batches completed thus far for
+@@ -503,6 +526,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Force a quiescent state.
+ */
+@@ -521,6 +545,13 @@ void rcu_bh_force_quiescent_state(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
+
++#else
++void rcu_force_quiescent_state(void)
++{
++}
++EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
++#endif
++
+ /*
+ * Force a quiescent state for RCU-sched.
+ */
+@@ -571,9 +602,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
+ case RCU_FLAVOR:
+ rsp = rcu_state_p;
+ break;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ case RCU_BH_FLAVOR:
+ rsp = &rcu_bh_state;
+ break;
++#endif
+ case RCU_SCHED_FLAVOR:
+ rsp = &rcu_sched_state;
+ break;
+@@ -3013,18 +3046,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
+ /*
+ * Do RCU core processing for the current CPU.
+ */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+ struct rcu_state *rsp;
+
+ if (cpu_is_offline(smp_processor_id()))
+ return;
+- trace_rcu_utilization(TPS("Start RCU core"));
+ for_each_rcu_flavor(rsp)
+ __rcu_process_callbacks(rsp);
+- trace_rcu_utilization(TPS("End RCU core"));
+ }
+
++static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
+ /*
+ * Schedule RCU callback invocation. If the specified type of RCU
+ * does not support RCU priority boosting, just do a direct call,
+@@ -3036,19 +3068,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
+ if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
+ return;
+- if (likely(!rsp->boost)) {
+- rcu_do_batch(rsp, rdp);
+- return;
+- }
+- invoke_rcu_callbacks_kthread();
++ rcu_do_batch(rsp, rdp);
+ }
+
++static void rcu_wake_cond(struct task_struct *t, int status)
++{
++ /*
++ * If the thread is yielding, only wake it when this
++ * is invoked from idle
++ */
++ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
++ wake_up_process(t);
++}
++
++/*
++ * Wake up this CPU's rcuc kthread to do RCU core processing.
++ */
+ static void invoke_rcu_core(void)
+ {
+- if (cpu_online(smp_processor_id()))
+- raise_softirq(RCU_SOFTIRQ);
++ unsigned long flags;
++ struct task_struct *t;
++
++ if (!cpu_online(smp_processor_id()))
++ return;
++ local_irq_save(flags);
++ __this_cpu_write(rcu_cpu_has_work, 1);
++ t = __this_cpu_read(rcu_cpu_kthread_task);
++ if (t != NULL && current != t)
++ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
++ local_irq_restore(flags);
+ }
+
++static void rcu_cpu_kthread_park(unsigned int cpu)
++{
++ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
++}
++
++static int rcu_cpu_kthread_should_run(unsigned int cpu)
++{
++ return __this_cpu_read(rcu_cpu_has_work);
++}
++
++/*
++ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
++ * RCU softirq used in flavors and configurations of RCU that do not
++ * support RCU priority boosting.
++ */
++static void rcu_cpu_kthread(unsigned int cpu)
++{
++ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
++ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
++ int spincnt;
++
++ for (spincnt = 0; spincnt < 10; spincnt++) {
++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
++ local_bh_disable();
++ *statusp = RCU_KTHREAD_RUNNING;
++ this_cpu_inc(rcu_cpu_kthread_loops);
++ local_irq_disable();
++ work = *workp;
++ *workp = 0;
++ local_irq_enable();
++ if (work)
++ rcu_process_callbacks();
++ local_bh_enable();
++ if (*workp == 0) {
++ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
++ *statusp = RCU_KTHREAD_WAITING;
++ return;
++ }
++ }
++ *statusp = RCU_KTHREAD_YIELDING;
++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
++ schedule_timeout_interruptible(2);
++ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
++ *statusp = RCU_KTHREAD_WAITING;
++}
++
++static struct smp_hotplug_thread rcu_cpu_thread_spec = {
++ .store = &rcu_cpu_kthread_task,
++ .thread_should_run = rcu_cpu_kthread_should_run,
++ .thread_fn = rcu_cpu_kthread,
++ .thread_comm = "rcuc/%u",
++ .setup = rcu_cpu_kthread_setup,
++ .park = rcu_cpu_kthread_park,
++};
++
++/*
++ * Spawn per-CPU RCU core processing kthreads.
++ */
++static int __init rcu_spawn_core_kthreads(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ per_cpu(rcu_cpu_has_work, cpu) = 0;
++ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
++ return 0;
++}
++early_initcall(rcu_spawn_core_kthreads);
++
+ /*
+ * Handle any core-RCU processing required by a call_rcu() invocation.
+ */
+@@ -3192,6 +3311,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue an RCU callback for invocation after a quicker grace period.
+ */
+@@ -3200,6 +3320,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
+ __call_rcu(head, func, &rcu_bh_state, -1, 0);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+
+ /*
+ * Queue an RCU callback for lazy invocation after a grace period.
+@@ -3291,6 +3412,7 @@ void synchronize_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+@@ -3317,6 +3439,7 @@ void synchronize_rcu_bh(void)
+ wait_rcu_gp(call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
++#endif
+
+ /**
+ * get_state_synchronize_rcu - Snapshot current RCU state
+@@ -3695,6 +3818,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
+ mutex_unlock(&rsp->barrier_mutex);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+ */
+@@ -3703,6 +3827,7 @@ void rcu_barrier_bh(void)
+ _rcu_barrier(&rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
++#endif
+
+ /**
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
+@@ -4196,12 +4321,13 @@ void __init rcu_init(void)
+
+ rcu_bootup_announce();
+ rcu_init_geometry();
++#ifndef CONFIG_PREEMPT_RT_FULL
+ rcu_init_one(&rcu_bh_state);
++#endif
+ rcu_init_one(&rcu_sched_state);
+ if (dump_tree)
+ rcu_dump_rcu_node_tree(&rcu_sched_state);
+ __rcu_init_preempt();
+- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+
+ /*
+ * We don't need protection against CPU-hotplug here because
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index f714f873bf9d..71631196e66e 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -587,18 +587,18 @@ extern struct list_head rcu_struct_flavors;
+ */
+ extern struct rcu_state rcu_sched_state;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern struct rcu_state rcu_bh_state;
++#endif
+
+ #ifdef CONFIG_PREEMPT_RCU
+ extern struct rcu_state rcu_preempt_state;
+ #endif /* #ifdef CONFIG_PREEMPT_RCU */
+
+-#ifdef CONFIG_RCU_BOOST
+ DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
+ DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
+ DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
+ DECLARE_PER_CPU(char, rcu_cpu_has_work);
+-#endif /* #ifdef CONFIG_RCU_BOOST */
+
+ #ifndef RCU_TREE_NONCORE
+
+@@ -618,10 +618,9 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
+ static void __init __rcu_init_preempt(void);
+ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+-static void invoke_rcu_callbacks_kthread(void);
+ static bool rcu_is_callbacks_kthread(void);
++static void rcu_cpu_kthread_setup(unsigned int cpu);
+ #ifdef CONFIG_RCU_BOOST
+-static void rcu_preempt_do_callbacks(void);
+ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
+ struct rcu_node *rnp);
+ #endif /* #ifdef CONFIG_RCU_BOOST */
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 0082fce402a0..e08cddadd9c7 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -24,25 +24,10 @@
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+-#include <linux/delay.h>
+-#include <linux/gfp.h>
+-#include <linux/oom.h>
+-#include <linux/smpboot.h>
+-#include "../time/tick-internal.h"
+-
+ #ifdef CONFIG_RCU_BOOST
+
+ #include "../locking/rtmutex_common.h"
+
+-/*
+- * Control variables for per-CPU and per-rcu_node kthreads. These
+- * handle all flavors of RCU.
+- */
+-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
+-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
+-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
+-DEFINE_PER_CPU(char, rcu_cpu_has_work);
+-
+ #else /* #ifdef CONFIG_RCU_BOOST */
+
+ /*
+@@ -55,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
+
+ #endif /* #else #ifdef CONFIG_RCU_BOOST */
+
++/*
++ * Control variables for per-CPU and per-rcu_node kthreads. These
++ * handle all flavors of RCU.
++ */
++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
++DEFINE_PER_CPU(char, rcu_cpu_has_work);
++
+ #ifdef CONFIG_RCU_NOCB_CPU
+ static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
+ static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
+@@ -426,7 +419,7 @@ void rcu_read_unlock_special(struct task_struct *t)
+ }
+
+ /* Hardware IRQ handlers cannot block, complain if they get here. */
+- if (in_irq() || in_serving_softirq()) {
++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
+ lockdep_rcu_suspicious(__FILE__, __LINE__,
+ "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
+ pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
+@@ -632,15 +625,6 @@ static void rcu_preempt_check_callbacks(void)
+ t->rcu_read_unlock_special.b.need_qs = true;
+ }
+
+-#ifdef CONFIG_RCU_BOOST
+-
+-static void rcu_preempt_do_callbacks(void)
+-{
+- rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
+-}
+-
+-#endif /* #ifdef CONFIG_RCU_BOOST */
+-
+ /*
+ * Queue a preemptible-RCU callback for invocation after a grace period.
+ */
+@@ -829,6 +813,19 @@ void exit_rcu(void)
+
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
++/*
++ * If boosting, set rcuc kthreads to realtime priority.
++ */
++static void rcu_cpu_kthread_setup(unsigned int cpu)
++{
++#ifdef CONFIG_RCU_BOOST
++ struct sched_param sp;
++
++ sp.sched_priority = kthread_prio;
++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
++#endif /* #ifdef CONFIG_RCU_BOOST */
++}
++
+ #ifdef CONFIG_RCU_BOOST
+
+ #include "../locking/rtmutex_common.h"
+@@ -860,16 +857,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
+
+ #endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+-static void rcu_wake_cond(struct task_struct *t, int status)
+-{
+- /*
+- * If the thread is yielding, only wake it when this
+- * is invoked from idle
+- */
+- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
+- wake_up_process(t);
+-}
+-
+ /*
+ * Carry out RCU priority boosting on the task indicated by ->exp_tasks
+ * or ->boost_tasks, advancing the pointer to the next task in the
+@@ -1013,23 +1000,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+ }
+
+ /*
+- * Wake up the per-CPU kthread to invoke RCU callbacks.
+- */
+-static void invoke_rcu_callbacks_kthread(void)
+-{
+- unsigned long flags;
+-
+- local_irq_save(flags);
+- __this_cpu_write(rcu_cpu_has_work, 1);
+- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+- current != __this_cpu_read(rcu_cpu_kthread_task)) {
+- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
+- __this_cpu_read(rcu_cpu_kthread_status));
+- }
+- local_irq_restore(flags);
+-}
+-
+-/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+@@ -1083,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
+ return 0;
+ }
+
+-static void rcu_kthread_do_work(void)
+-{
+- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
+- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
+- rcu_preempt_do_callbacks();
+-}
+-
+-static void rcu_cpu_kthread_setup(unsigned int cpu)
+-{
+- struct sched_param sp;
+-
+- sp.sched_priority = kthread_prio;
+- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+-}
+-
+-static void rcu_cpu_kthread_park(unsigned int cpu)
+-{
+- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+-}
+-
+-static int rcu_cpu_kthread_should_run(unsigned int cpu)
+-{
+- return __this_cpu_read(rcu_cpu_has_work);
+-}
+-
+-/*
+- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+- * RCU softirq used in flavors and configurations of RCU that do not
+- * support RCU priority boosting.
+- */
+-static void rcu_cpu_kthread(unsigned int cpu)
+-{
+- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
+- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
+- int spincnt;
+-
+- for (spincnt = 0; spincnt < 10; spincnt++) {
+- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+- local_bh_disable();
+- *statusp = RCU_KTHREAD_RUNNING;
+- this_cpu_inc(rcu_cpu_kthread_loops);
+- local_irq_disable();
+- work = *workp;
+- *workp = 0;
+- local_irq_enable();
+- if (work)
+- rcu_kthread_do_work();
+- local_bh_enable();
+- if (*workp == 0) {
+- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+- *statusp = RCU_KTHREAD_WAITING;
+- return;
+- }
+- }
+- *statusp = RCU_KTHREAD_YIELDING;
+- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+- schedule_timeout_interruptible(2);
+- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+- *statusp = RCU_KTHREAD_WAITING;
+-}
+-
+ /*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question. The CPU hotplug lock is still
+@@ -1174,26 +1083,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+ free_cpumask_var(cm);
+ }
+
+-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+- .store = &rcu_cpu_kthread_task,
+- .thread_should_run = rcu_cpu_kthread_should_run,
+- .thread_fn = rcu_cpu_kthread,
+- .thread_comm = "rcuc/%u",
+- .setup = rcu_cpu_kthread_setup,
+- .park = rcu_cpu_kthread_park,
+-};
+-
+ /*
+ * Spawn boost kthreads -- called as soon as the scheduler is running.
+ */
+ static void __init rcu_spawn_boost_kthreads(void)
+ {
+ struct rcu_node *rnp;
+- int cpu;
+-
+- for_each_possible_cpu(cpu)
+- per_cpu(rcu_cpu_has_work, cpu) = 0;
+- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ rcu_for_each_leaf_node(rcu_state_p, rnp)
+ (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
+ }
+@@ -1216,11 +1111,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+
+-static void invoke_rcu_callbacks_kthread(void)
+-{
+- WARN_ON_ONCE(1);
+-}
+-
+ static bool rcu_is_callbacks_kthread(void)
+ {
+ return false;
+@@ -1244,7 +1134,7 @@ static void rcu_prepare_kthreads(int cpu)
+
+ #endif /* #else #ifdef CONFIG_RCU_BOOST */
+
+-#if !defined(CONFIG_RCU_FAST_NO_HZ)
++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
+
+ /*
+ * Check to see if any future RCU-related work will need to be done
+@@ -1261,7 +1151,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+ return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
+ ? 0 : rcu_cpu_has_callbacks(NULL);
+ }
++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
+
++#if !defined(CONFIG_RCU_FAST_NO_HZ)
+ /*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+@@ -1357,6 +1249,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
+ return cbs_ready;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ /*
+ * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
+ * to invoke. If the CPU has callbacks, try to advance them. Tell the
+@@ -1402,6 +1296,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+ *nextevt = basemono + dj * TICK_NSEC;
+ return 0;
+ }
++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
+
+ /*
+ * Prepare a CPU for idle from an RCU perspective. The first major task
+diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
+index f0d8322bc3ec..b40d3468ba4e 100644
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -295,6 +295,7 @@ int rcu_read_lock_held(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
+ *
+@@ -321,6 +322,7 @@ int rcu_read_lock_bh_held(void)
+ return in_softirq() || irqs_disabled();
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
++#endif
+
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+diff --git a/kernel/relay.c b/kernel/relay.c
+index d797502140b9..cf05c17ddbed 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -336,6 +336,10 @@ static void wakeup_readers(unsigned long data)
+ {
+ struct rchan_buf *buf = (struct rchan_buf *)data;
+ wake_up_interruptible(&buf->read_wait);
++ /*
++ * Stupid polling for now:
++ */
++ mod_timer(&buf->timer, jiffies + 1);
+ }
+
+ /**
+@@ -353,6 +357,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
+ init_waitqueue_head(&buf->read_wait);
+ kref_init(&buf->kref);
+ setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
++ mod_timer(&buf->timer, jiffies + 1);
+ } else
+ del_timer_sync(&buf->timer);
+
+@@ -767,15 +772,6 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
+ else
+ buf->early_bytes += buf->chan->subbuf_size -
+ buf->padding[old_subbuf];
+- smp_mb();
+- if (waitqueue_active(&buf->read_wait))
+- /*
+- * Calling wake_up_interruptible() from here
+- * will deadlock if we happen to be logging
+- * from the scheduler (trying to re-grab
+- * rq->lock), so defer it.
+- */
+- mod_timer(&buf->timer, jiffies + 1);
+ }
+
+ old = buf->data;
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 5e59b832ae2b..7337a7f60e3f 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -17,7 +17,7 @@ endif
+
+ obj-y += core.o loadavg.o clock.o cputime.o
+ obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
+-obj-y += wait.o swait.o completion.o idle.o
++obj-y += wait.o swait.o swork.o completion.o idle.o
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+ obj-$(CONFIG_SCHEDSTATS) += stats.o
+diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
+index 8d0f35debf35..b62cf6400fe0 100644
+--- a/kernel/sched/completion.c
++++ b/kernel/sched/completion.c
+@@ -30,10 +30,10 @@ void complete(struct completion *x)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ swake_up_locked(&x->wait);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete);
+
+@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ x->done += UINT_MAX/2;
+- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ swake_up_all_locked(&x->wait);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete_all);
+
+@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
+ long (*action)(long), long timeout, int state)
+ {
+ if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
++ DECLARE_SWAITQUEUE(wait);
+
+- __add_wait_queue_tail_exclusive(&x->wait, &wait);
++ __prepare_to_swait(&x->wait, &wait);
+ do {
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
+ __set_current_state(state);
+- spin_unlock_irq(&x->wait.lock);
++ raw_spin_unlock_irq(&x->wait.lock);
+ timeout = action(timeout);
+- spin_lock_irq(&x->wait.lock);
++ raw_spin_lock_irq(&x->wait.lock);
+ } while (!x->done && timeout);
+- __remove_wait_queue(&x->wait, &wait);
++ __finish_swait(&x->wait, &wait);
+ if (!x->done)
+ return timeout;
+ }
+@@ -89,9 +89,9 @@ __wait_for_common(struct completion *x,
+ {
+ might_sleep();
+
+- spin_lock_irq(&x->wait.lock);
++ raw_spin_lock_irq(&x->wait.lock);
+ timeout = do_wait_for_common(x, action, timeout, state);
+- spin_unlock_irq(&x->wait.lock);
++ raw_spin_unlock_irq(&x->wait.lock);
+ return timeout;
+ }
+
+@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x)
+ if (!READ_ONCE(x->done))
+ return 0;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ if (!x->done)
+ ret = 0;
+ else
+ x->done--;
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(try_wait_for_completion);
+@@ -311,7 +311,7 @@ bool completion_done(struct completion *x)
+ * after it's acquired the lock.
+ */
+ smp_rmb();
+- spin_unlock_wait(&x->wait.lock);
++ raw_spin_unlock_wait(&x->wait.lock);
+ return true;
+ }
+ EXPORT_SYMBOL(completion_done);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 44817c640e99..55aafcff5810 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features =
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#else
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#endif
+
+ /*
+ * period over which we average the RT time consumption, measured
+@@ -345,6 +349,7 @@ static void init_rq_hrtick(struct rq *rq)
+
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
++ rq->hrtick_timer.irqsafe = 1;
+ }
+ #else /* CONFIG_SCHED_HRTICK */
+ static inline void hrtick_clear(struct rq *rq)
+@@ -449,7 +454,7 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+ head->lastp = &node->next;
+ }
+
+-void wake_up_q(struct wake_q_head *head)
++void __wake_up_q(struct wake_q_head *head, bool sleeper)
+ {
+ struct wake_q_node *node = head->first;
+
+@@ -466,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
+ * wake_up_process() implies a wmb() to pair with the queueing
+ * in wake_q_add() so as not to miss wakeups.
+ */
+- wake_up_process(task);
++ if (sleeper)
++ wake_up_lock_sleeper(task);
++ else
++ wake_up_process(task);
+ put_task_struct(task);
+ }
+ }
+@@ -502,6 +510,38 @@ void resched_curr(struct rq *rq)
+ trace_sched_wake_idle_without_ipi(cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++void resched_curr_lazy(struct rq *rq)
++{
++ struct task_struct *curr = rq->curr;
++ int cpu;
++
++ if (!sched_feat(PREEMPT_LAZY)) {
++ resched_curr(rq);
++ return;
++ }
++
++ lockdep_assert_held(&rq->lock);
++
++ if (test_tsk_need_resched(curr))
++ return;
++
++ if (test_tsk_need_resched_lazy(curr))
++ return;
++
++ set_tsk_need_resched_lazy(curr);
++
++ cpu = cpu_of(rq);
++ if (cpu == smp_processor_id())
++ return;
++
++ /* NEED_RESCHED_LAZY must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(curr))
++ smp_send_reschedule(cpu);
++}
++#endif
++
+ void resched_cpu(int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -525,11 +565,14 @@ void resched_cpu(int cpu)
+ */
+ int get_nohz_timer_target(void)
+ {
+- int i, cpu = smp_processor_id();
++ int i, cpu;
+ struct sched_domain *sd;
+
++ preempt_disable_rt();
++ cpu = smp_processor_id();
++
+ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
+- return cpu;
++ goto preempt_en_rt;
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+@@ -548,6 +591,8 @@ int get_nohz_timer_target(void)
+ cpu = housekeeping_any_cpu();
+ unlock:
+ rcu_read_unlock();
++preempt_en_rt:
++ preempt_enable_rt();
+ return cpu;
+ }
+ /*
+@@ -1089,6 +1134,11 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+
+ lockdep_assert_held(&p->pi_lock);
+
++ if (__migrate_disabled(p)) {
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ return;
++ }
++
+ queued = task_on_rq_queued(p);
+ running = task_current(rq, p);
+
+@@ -1111,6 +1161,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ enqueue_task(rq, p, ENQUEUE_RESTORE);
+ }
+
++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
++static DEFINE_MUTEX(sched_down_mutex);
++static cpumask_t sched_down_cpumask;
++
++void tell_sched_cpu_down_begin(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_set_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++void tell_sched_cpu_down_done(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_clear_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++/**
++ * migrate_me - try to move the current task off this cpu
++ *
++ * Used by the pin_current_cpu() code to try to get tasks
++ * to move off the current CPU as it is going down.
++ * It will only move the task if the task isn't pinned to
++ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
++ * and the task has to be in a RUNNING state. Otherwise the
++ * movement of the task will wake it up (change its state
++ * to running) when the task did not expect it.
++ *
++ * Returns 1 if it succeeded in moving the current task
++ * 0 otherwise.
++ */
++int migrate_me(void)
++{
++ struct task_struct *p = current;
++ struct migration_arg arg;
++ struct cpumask *cpumask;
++ struct cpumask *mask;
++ unsigned int dest_cpu;
++ struct rq_flags rf;
++ struct rq *rq;
++
++ /*
++ * We can not migrate tasks bounded to a CPU or tasks not
++ * running. The movement of the task will wake it up.
++ */
++ if (p->flags & PF_NO_SETAFFINITY || p->state)
++ return 0;
++
++ mutex_lock(&sched_down_mutex);
++ rq = task_rq_lock(p, &rf);
++
++ cpumask = this_cpu_ptr(&sched_cpumasks);
++ mask = &p->cpus_allowed;
++
++ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
++
++ if (!cpumask_weight(cpumask)) {
++ /* It's only on this CPU? */
++ task_rq_unlock(rq, p, &rf);
++ mutex_unlock(&sched_down_mutex);
++ return 0;
++ }
++
++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
++
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ task_rq_unlock(rq, p, &rf);
++
++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ mutex_unlock(&sched_down_mutex);
++
++ return 1;
++}
++
+ /*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+@@ -1168,7 +1296,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ }
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+@@ -1355,6 +1483,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
+ return ret;
+ }
+
++static bool check_task_state(struct task_struct *p, long match_state)
++{
++ bool match = false;
++
++ raw_spin_lock_irq(&p->pi_lock);
++ if (p->state == match_state || p->saved_state == match_state)
++ match = true;
++ raw_spin_unlock_irq(&p->pi_lock);
++
++ return match;
++}
++
+ /*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+@@ -1399,7 +1539,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && !check_task_state(p, match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -1414,7 +1554,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ running = task_running(rq, p);
+ queued = task_on_rq_queued(p);
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state ||
++ p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &rf);
+
+@@ -1670,10 +1811,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
+ {
+ activate_task(rq, p, en_flags);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+-
+- /* if a worker is waking up, notify workqueue */
+- if (p->flags & PF_WQ_WORKER)
+- wq_worker_waking_up(p, cpu_of(rq));
+ }
+
+ /*
+@@ -2008,8 +2145,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ */
+ smp_mb__before_spinlock();
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state) {
++ p->saved_state = TASK_RUNNING;
++ success = 1;
++ }
++ }
+ goto out;
++ }
++
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
+
+ trace_sched_waking(p);
+
+@@ -2093,53 +2249,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ }
+
+ /**
+- * try_to_wake_up_local - try to wake up a local task with rq lock held
+- * @p: the thread to be awakened
+- *
+- * Put @p on the run-queue if it's not already there. The caller must
+- * ensure that this_rq() is locked, @p is bound to this_rq() and not
+- * the current task.
+- */
+-static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
+-{
+- struct rq *rq = task_rq(p);
+-
+- if (WARN_ON_ONCE(rq != this_rq()) ||
+- WARN_ON_ONCE(p == current))
+- return;
+-
+- lockdep_assert_held(&rq->lock);
+-
+- if (!raw_spin_trylock(&p->pi_lock)) {
+- /*
+- * This is OK, because current is on_cpu, which avoids it being
+- * picked for load-balance and preemption/IRQs are still
+- * disabled avoiding further scheduler activity on it and we've
+- * not yet picked a replacement task.
+- */
+- lockdep_unpin_lock(&rq->lock, cookie);
+- raw_spin_unlock(&rq->lock);
+- raw_spin_lock(&p->pi_lock);
+- raw_spin_lock(&rq->lock);
+- lockdep_repin_lock(&rq->lock, cookie);
+- }
+-
+- if (!(p->state & TASK_NORMAL))
+- goto out;
+-
+- trace_sched_waking(p);
+-
+- if (!task_on_rq_queued(p))
+- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+-
+- ttwu_do_wakeup(rq, p, 0, cookie);
+- if (schedstat_enabled())
+- ttwu_stat(p, smp_processor_id(), 0);
+-out:
+- raw_spin_unlock(&p->pi_lock);
+-}
+-
+-/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+@@ -2157,6 +2266,18 @@ int wake_up_process(struct task_struct *p)
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ return try_to_wake_up(p, state, 0);
+@@ -2433,6 +2554,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+ p->on_cpu = 0;
+ #endif
+ init_task_preempt_count(p);
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(p)->preempt_lazy_count = 0;
++#endif
+ #ifdef CONFIG_SMP
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ RB_CLEAR_NODE(&p->pushable_dl_tasks);
+@@ -2761,8 +2885,12 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ finish_arch_post_lock_switch();
+
+ fire_sched_in_preempt_notifiers(current);
++ /*
++ * We use mmdrop_delayed() here so we don't have to do the
++ * full __mmdrop() when we are the last user.
++ */
+ if (mm)
+- mmdrop(mm);
++ mmdrop_delayed(mm);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ if (prev->sched_class->task_dead)
+ prev->sched_class->task_dead(prev);
+@@ -3237,6 +3365,77 @@ static inline void schedule_debug(struct task_struct *prev)
+ schedstat_inc(this_rq(), sched_count);
+ }
+
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
++
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ if (unlikely(p->migrate_disable_atomic)) {
++ tracing_off();
++ WARN_ON_ONCE(1);
++ }
++#endif
++
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ return;
++ }
++
++ preempt_disable();
++ preempt_lazy_disable();
++ pin_current_cpu();
++ p->migrate_disable = 1;
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ if (unlikely(p->migrate_disable_atomic)) {
++ tracing_off();
++ WARN_ON_ONCE(1);
++ }
++#endif
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ if (p->migrate_disable > 1) {
++ p->migrate_disable--;
++ return;
++ }
++
++ preempt_disable();
++ /*
++ * Clearing migrate_disable causes tsk_cpus_allowed to
++ * show the tasks original cpu affinity.
++ */
++ p->migrate_disable = 0;
++
++ unpin_current_cpu();
++ preempt_enable();
++ preempt_lazy_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++#endif
++
+ /*
+ * Pick up the highest-prio task:
+ */
+@@ -3364,19 +3563,6 @@ static void __sched notrace __schedule(bool preempt)
+ } else {
+ deactivate_task(rq, prev, DEQUEUE_SLEEP);
+ prev->on_rq = 0;
+-
+- /*
+- * If a worker went to sleep, notify and ask workqueue
+- * whether it wants to wake up a task to maintain
+- * concurrency.
+- */
+- if (prev->flags & PF_WQ_WORKER) {
+- struct task_struct *to_wakeup;
+-
+- to_wakeup = wq_worker_sleeping(prev);
+- if (to_wakeup)
+- try_to_wake_up_local(to_wakeup, cookie);
+- }
+ }
+ switch_count = &prev->nvcsw;
+ }
+@@ -3386,6 +3572,7 @@ static void __sched notrace __schedule(bool preempt)
+
+ next = pick_next_task(rq, prev, cookie);
+ clear_tsk_need_resched(prev);
++ clear_tsk_need_resched_lazy(prev);
+ clear_preempt_need_resched();
+ rq->clock_skip_update = 0;
+
+@@ -3407,9 +3594,20 @@ STACK_FRAME_NON_STANDARD(__schedule); /* switch_to() */
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+- if (!tsk->state || tsk_is_pi_blocked(tsk))
++ if (!tsk->state)
+ return;
+ /*
++ * If a worker went to sleep, notify and ask workqueue whether
++ * it wants to wake up a task to maintain concurrency.
++ */
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_sleeping(tsk);
++
++
++ if (tsk_is_pi_blocked(tsk))
++ return;
++
++ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+@@ -3417,6 +3615,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ blk_schedule_flush_plug(tsk);
+ }
+
++static void sched_update_worker(struct task_struct *tsk)
++{
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_running(tsk);
++}
++
+ asmlinkage __visible void __sched schedule(void)
+ {
+ struct task_struct *tsk = current;
+@@ -3427,6 +3631,7 @@ asmlinkage __visible void __sched schedule(void)
+ __schedule(false);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
++ sched_update_worker(tsk);
+ }
+ EXPORT_SYMBOL(schedule);
+
+@@ -3490,6 +3695,30 @@ static void __sched notrace preempt_schedule_common(void)
+ } while (need_resched());
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++/*
++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
++ * preempt_lazy_count counter >0.
++ */
++static __always_inline int preemptible_lazy(void)
++{
++ if (test_thread_flag(TIF_NEED_RESCHED))
++ return 1;
++ if (current_thread_info()->preempt_lazy_count)
++ return 0;
++ return 1;
++}
++
++#else
++
++static inline int preemptible_lazy(void)
++{
++ return 1;
++}
++
++#endif
++
+ #ifdef CONFIG_PREEMPT
+ /*
+ * this is the entry point to schedule() from in-kernel preemption
+@@ -3504,7 +3733,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+ */
+ if (likely(!preemptible()))
+ return;
+-
++ if (!preemptible_lazy())
++ return;
+ preempt_schedule_common();
+ }
+ NOKPROBE_SYMBOL(preempt_schedule);
+@@ -3531,6 +3761,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+ if (likely(!preemptible()))
+ return;
+
++ if (!preemptible_lazy())
++ return;
++
+ do {
+ /*
+ * Because the function tracer can trace preempt_count_sub()
+@@ -3553,7 +3786,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+ * an infinite recursion.
+ */
+ prev_ctx = exception_enter();
++ /*
++ * The add/subtract must not be traced by the function
++ * tracer. But we still want to account for the
++ * preempt off latency tracer. Since the _notrace versions
++ * of add/subtract skip the accounting for latency tracer
++ * we must force it manually.
++ */
++ start_critical_timings();
+ __schedule(true);
++ stop_critical_timings();
+ exception_exit(prev_ctx);
+
+ preempt_latency_stop(1);
+@@ -4901,6 +5143,7 @@ int __cond_resched_lock(spinlock_t *lock)
+ }
+ EXPORT_SYMBOL(__cond_resched_lock);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int __sched __cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+@@ -4914,6 +5157,7 @@ int __sched __cond_resched_softirq(void)
+ return 0;
+ }
+ EXPORT_SYMBOL(__cond_resched_softirq);
++#endif
+
+ /**
+ * yield - yield the current processor to other threads.
+@@ -5283,7 +5527,9 @@ void init_idle(struct task_struct *idle, int cpu)
+
+ /* Set the preempt count _outside_ the spinlocks! */
+ init_idle_preempt_count(idle, cpu);
+-
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(idle)->preempt_lazy_count = 0;
++#endif
+ /*
+ * The idle tasks have their own, simple scheduling class:
+ */
+@@ -5426,6 +5672,8 @@ void sched_setnuma(struct task_struct *p, int nid)
+ #endif /* CONFIG_NUMA_BALANCING */
+
+ #ifdef CONFIG_HOTPLUG_CPU
++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
++
+ /*
+ * Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+@@ -5440,7 +5688,12 @@ void idle_task_exit(void)
+ switch_mm_irqs_off(mm, &init_mm, current);
+ finish_arch_post_lock_switch();
+ }
+- mmdrop(mm);
++ /*
++ * Defer the cleanup to an alive cpu. On RT we can neither
++ * call mmdrop() nor mmdrop_delayed() from here.
++ */
++ per_cpu(idle_last_mm, smp_processor_id()) = mm;
++
+ }
+
+ /*
+@@ -7315,6 +7568,10 @@ int sched_cpu_dying(unsigned int cpu)
+ update_max_interval();
+ nohz_balance_exit_idle(cpu);
+ hrtick_clear(rq);
++ if (per_cpu(idle_last_mm, cpu)) {
++ mmdrop_delayed(per_cpu(idle_last_mm, cpu));
++ per_cpu(idle_last_mm, cpu) = NULL;
++ }
+ return 0;
+ }
+ #endif
+@@ -7566,7 +7823,7 @@ void __init sched_init(void)
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
+ {
+- int nested = preempt_count() + rcu_preempt_depth();
++ int nested = preempt_count() + sched_rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+ }
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 1ce8867283dc..766da04b06a0 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
+
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer->function = dl_task_timer;
++ timer->irqsafe = 1;
+ }
+
+ static
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 2a0a9995256d..48a9b6f57249 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -552,6 +552,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
+ P(rt_throttled);
+ PN(rt_time);
+ PN(rt_runtime);
++#ifdef CONFIG_SMP
++ P(rt_nr_migratory);
++#endif
+
+ #undef PN
+ #undef P
+@@ -947,6 +950,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+ #endif
+ P(policy);
+ P(prio);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ P(migrate_disable);
++#endif
++ P(nr_cpus_allowed);
+ #undef PN
+ #undef __PN
+ #undef P
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 8b3610c871f2..1145079af264 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3508,7 +3508,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ /*
+ * The current task ran long enough, ensure it doesn't get
+ * re-elected due to buddy favours.
+@@ -3532,7 +3532,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ return;
+
+ if (delta > ideal_runtime)
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static void
+@@ -3677,7 +3677,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+ * validating it and just reschedule.
+ */
+ if (queued) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ return;
+ }
+ /*
+@@ -3859,7 +3859,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
+ * hierarchy can be throttled
+ */
+ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static __always_inline
+@@ -4487,7 +4487,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
+
+ if (delta < 0) {
+ if (rq->curr == p)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ return;
+ }
+ hrtick_start(rq, delta);
+@@ -5676,7 +5676,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ return;
+
+ preempt:
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+@@ -8402,7 +8402,7 @@ static void task_fork_fair(struct task_struct *p)
+ * 'current' within the tree based on its new key value.
+ */
+ swap(curr->vruntime, se->vruntime);
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ }
+
+ se->vruntime -= cfs_rq->min_vruntime;
+@@ -8426,7 +8426,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
+ */
+ if (rq->curr == p) {
+ if (p->prio > oldprio)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ } else
+ check_preempt_curr(rq, p, 0);
+ }
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 69631fa46c2f..6d28fcd08872 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -45,11 +45,19 @@ SCHED_FEAT(LB_BIAS, true)
+ */
+ SCHED_FEAT(NONTASK_CAPACITY, true)
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++SCHED_FEAT(TTWU_QUEUE, false)
++# ifdef CONFIG_PREEMPT_LAZY
++SCHED_FEAT(PREEMPT_LAZY, true)
++# endif
++#else
++
+ /*
+ * Queue remote wakeups on the target CPU and process them
+ * using the scheduler IPI. Reduces rq->lock contention/bounces.
+ */
+ SCHED_FEAT(TTWU_QUEUE, true)
++#endif
+
+ #ifdef HAVE_RT_PUSH_IPI
+ /*
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index d5690b722691..731cd0e98c15 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
+
+ hrtimer_init(&rt_b->rt_period_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rt_b->rt_period_timer.irqsafe = 1;
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
+ }
+
+@@ -101,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+ rt_rq->push_cpu = nr_cpu_ids;
+ raw_spin_lock_init(&rt_rq->push_lock);
+ init_irq_work(&rt_rq->push_work, push_irq_work_func);
++ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
+ #endif
+ #endif /* CONFIG_SMP */
+ /* We start is dequeued state, because no RT tasks are queued */
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index c64fc5114004..af58f9b3ece4 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1138,6 +1138,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+ #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
+ #define WF_FORK 0x02 /* child wakeup after fork */
+ #define WF_MIGRATED 0x4 /* internal use, task got migrated */
++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+
+ /*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+@@ -1316,6 +1317,15 @@ extern void init_sched_fair_class(void);
+ extern void resched_curr(struct rq *rq);
+ extern void resched_cpu(int cpu);
+
++#ifdef CONFIG_PREEMPT_LAZY
++extern void resched_curr_lazy(struct rq *rq);
++#else
++static inline void resched_curr_lazy(struct rq *rq)
++{
++ resched_curr(rq);
++}
++#endif
++
+ extern struct rt_bandwidth def_rt_bandwidth;
+ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+
+diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
+index 82f0dff90030..ef027ff3250a 100644
+--- a/kernel/sched/swait.c
++++ b/kernel/sched/swait.c
+@@ -1,5 +1,6 @@
+ #include <linux/sched.h>
+ #include <linux/swait.h>
++#include <linux/suspend.h>
+
+ void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
+ struct lock_class_key *key)
+@@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_head *q)
+ }
+ EXPORT_SYMBOL(swake_up_locked);
+
++void swake_up_all_locked(struct swait_queue_head *q)
++{
++ struct swait_queue *curr;
++ int wakes = 0;
++
++ while (!list_empty(&q->task_list)) {
++
++ curr = list_first_entry(&q->task_list, typeof(*curr),
++ task_list);
++ wake_up_process(curr->task);
++ list_del_init(&curr->task_list);
++ wakes++;
++ }
++ if (pm_in_action)
++ return;
++ WARN(wakes > 2, "complete_all() with %d waiters\n", wakes);
++}
++EXPORT_SYMBOL(swake_up_all_locked);
++
+ void swake_up(struct swait_queue_head *q)
+ {
+ unsigned long flags;
+@@ -54,6 +74,7 @@ void swake_up_all(struct swait_queue_head *q)
+ if (!swait_active(q))
+ return;
+
++ WARN_ON(irqs_disabled());
+ raw_spin_lock_irq(&q->lock);
+ list_splice_init(&q->task_list, &tmp);
+ while (!list_empty(&tmp)) {
+diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
+new file mode 100644
+index 000000000000..1950f40ca725
+--- /dev/null
++++ b/kernel/sched/swork.c
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
++ *
++ * Provides a framework for enqueuing callbacks from irq context
++ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
++ */
++
++#include <linux/swait.h>
++#include <linux/swork.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/export.h>
++
++#define SWORK_EVENT_PENDING (1 << 0)
++
++static DEFINE_MUTEX(worker_mutex);
++static struct sworker *glob_worker;
++
++struct sworker {
++ struct list_head events;
++ struct swait_queue_head wq;
++
++ raw_spinlock_t lock;
++
++ struct task_struct *task;
++ int refs;
++};
++
++static bool swork_readable(struct sworker *worker)
++{
++ bool r;
++
++ if (kthread_should_stop())
++ return true;
++
++ raw_spin_lock_irq(&worker->lock);
++ r = !list_empty(&worker->events);
++ raw_spin_unlock_irq(&worker->lock);
++
++ return r;
++}
++
++static int swork_kthread(void *arg)
++{
++ struct sworker *worker = arg;
++
++ for (;;) {
++ swait_event_interruptible(worker->wq,
++ swork_readable(worker));
++ if (kthread_should_stop())
++ break;
++
++ raw_spin_lock_irq(&worker->lock);
++ while (!list_empty(&worker->events)) {
++ struct swork_event *sev;
++
++ sev = list_first_entry(&worker->events,
++ struct swork_event, item);
++ list_del(&sev->item);
++ raw_spin_unlock_irq(&worker->lock);
++
++ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
++ &sev->flags));
++ sev->func(sev);
++ raw_spin_lock_irq(&worker->lock);
++ }
++ raw_spin_unlock_irq(&worker->lock);
++ }
++ return 0;
++}
++
++static struct sworker *swork_create(void)
++{
++ struct sworker *worker;
++
++ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
++ if (!worker)
++ return ERR_PTR(-ENOMEM);
++
++ INIT_LIST_HEAD(&worker->events);
++ raw_spin_lock_init(&worker->lock);
++ init_swait_queue_head(&worker->wq);
++
++ worker->task = kthread_run(swork_kthread, worker, "kswork");
++ if (IS_ERR(worker->task)) {
++ kfree(worker);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return worker;
++}
++
++static void swork_destroy(struct sworker *worker)
++{
++ kthread_stop(worker->task);
++
++ WARN_ON(!list_empty(&worker->events));
++ kfree(worker);
++}
++
++/**
++ * swork_queue - queue swork
++ *
++ * Returns %false if @work was already on a queue, %true otherwise.
++ *
++ * The work is queued and processed on a random CPU
++ */
++bool swork_queue(struct swork_event *sev)
++{
++ unsigned long flags;
++
++ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
++ return false;
++
++ raw_spin_lock_irqsave(&glob_worker->lock, flags);
++ list_add_tail(&sev->item, &glob_worker->events);
++ raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
++
++ swake_up(&glob_worker->wq);
++ return true;
++}
++EXPORT_SYMBOL_GPL(swork_queue);
++
++/**
++ * swork_get - get an instance of the sworker
++ *
++ * Returns an negative error code if the initialization if the worker did not
++ * work, %0 otherwise.
++ *
++ */
++int swork_get(void)
++{
++ struct sworker *worker;
++
++ mutex_lock(&worker_mutex);
++ if (!glob_worker) {
++ worker = swork_create();
++ if (IS_ERR(worker)) {
++ mutex_unlock(&worker_mutex);
++ return -ENOMEM;
++ }
++
++ glob_worker = worker;
++ }
++
++ glob_worker->refs++;
++ mutex_unlock(&worker_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(swork_get);
++
++/**
++ * swork_put - puts an instance of the sworker
++ *
++ * Will destroy the sworker thread. This function must not be called until all
++ * queued events have been completed.
++ */
++void swork_put(void)
++{
++ mutex_lock(&worker_mutex);
++
++ glob_worker->refs--;
++ if (glob_worker->refs > 0)
++ goto out;
++
++ swork_destroy(glob_worker);
++ glob_worker = NULL;
++out:
++ mutex_unlock(&worker_mutex);
++}
++EXPORT_SYMBOL_GPL(swork_put);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index af21afc00d08..7ead97a43298 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -14,6 +14,7 @@
+ #include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/sched.h>
++#include <linux/sched/rt.h>
+ #include <linux/fs.h>
+ #include <linux/tty.h>
+ #include <linux/binfmts.h>
+@@ -352,13 +353,30 @@ static bool task_participate_group_stop(struct task_struct *task)
+ return false;
+ }
+
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
++
++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
++ return NULL;
++ return q;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return 0;
++ return 1;
++}
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, int fromslab)
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -375,7 +393,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = get_task_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+@@ -392,6 +413,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ return q;
+ }
+
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqueue *q)
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+
++static void sigqueue_free_current(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
++ atomic_dec(&up->sigpending);
++ free_uid(up);
++ } else
++ __sigqueue_free(q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *queue)
+ }
+
+ /*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
++
++ flush_sigqueue(&tsk->pending);
++
++ q = get_task_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
++/*
+ * Flush all pending signals for this kthread.
+ */
+ void flush_signals(struct task_struct *t)
+@@ -525,7 +583,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+ still_pending:
+ list_del_init(&first->list);
+ copy_siginfo(info, &first->info);
+- __sigqueue_free(first);
++ sigqueue_free_current(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ {
+ int signr;
+
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -1156,8 +1216,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
+ */
+-int
+-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
++static int
++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ unsigned long int flags;
+ int ret, blocked, ignored;
+@@ -1182,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ return ret;
+ }
+
++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
++{
++/*
++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
++ * since it can not enable preemption, and the signal code's spin_locks
++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
++ * send the signal on exit of the trap.
++ */
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (in_atomic()) {
++ if (WARN_ON_ONCE(t != current))
++ return 0;
++ if (WARN_ON_ONCE(t->forced_info.si_signo))
++ return 0;
++
++ if (is_si_special(info)) {
++ WARN_ON_ONCE(info != SEND_SIG_PRIV);
++ t->forced_info.si_signo = sig;
++ t->forced_info.si_errno = 0;
++ t->forced_info.si_code = SI_KERNEL;
++ t->forced_info.si_pid = 0;
++ t->forced_info.si_uid = 0;
++ } else {
++ t->forced_info = *info;
++ }
++
++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++ return 0;
++ }
++#endif
++ return do_force_sig_info(sig, info, t);
++}
++
+ /*
+ * Nuke all other threads in the group.
+ */
+@@ -1216,12 +1309,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+ * Disable interrupts early to avoid deadlocks.
+ * See rcu_read_unlock() comment header for details.
+ */
+- local_irq_save(*flags);
++ local_irq_save_nort(*flags);
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ break;
+ }
+ /*
+@@ -1242,7 +1335,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ }
+
+ return sighand;
+@@ -1485,7 +1578,8 @@ EXPORT_SYMBOL(kill_pid);
+ */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ /* Preallocated sigqueue objects always from the slabcache ! */
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
+@@ -1846,15 +1940,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+ if (gstop_done && ptrace_reparented(current))
+ do_notify_parent_cldstop(current, false, why);
+
+- /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
+- *
+- * XXX: implement read_unlock_no_resched().
+- */
+- preempt_disable();
+ read_unlock(&tasklist_lock);
+- preempt_enable_no_resched();
+ freezable_schedule();
+ } else {
+ /*
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 17caf4b63342..a602b7152de7 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -21,10 +21,12 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+ #include <linux/rcupdate.h>
++#include <linux/delay.h>
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+ #include <linux/irq.h>
+
+ #define CREATE_TRACE_POINTS
+@@ -56,12 +58,108 @@ EXPORT_SYMBOL(irq_stat);
+ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
+
+ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
++DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
++#endif
+
+ const char * const softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+
++#ifdef CONFIG_NO_HZ_COMMON
++# ifdef CONFIG_PREEMPT_RT_FULL
++
++struct softirq_runner {
++ struct task_struct *runner[NR_SOFTIRQS];
++};
++
++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
++
++static inline void softirq_set_runner(unsigned int sirq)
++{
++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
++
++ sr->runner[sirq] = current;
++}
++
++static inline void softirq_clr_runner(unsigned int sirq)
++{
++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
++
++ sr->runner[sirq] = NULL;
++}
++
++/*
++ * On preempt-rt a softirq running context might be blocked on a
++ * lock. There might be no other runnable task on this CPU because the
++ * lock owner runs on some other CPU. So we have to go into idle with
++ * the pending bit set. Therefor we need to check this otherwise we
++ * warn about false positives which confuses users and defeats the
++ * whole purpose of this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
++ u32 warnpending;
++ int i;
++
++ if (rate_limit >= 10)
++ return;
++
++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
++ for (i = 0; i < NR_SOFTIRQS; i++) {
++ struct task_struct *tsk = sr->runner[i];
++
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ if (tsk) {
++ raw_spin_lock(&tsk->pi_lock);
++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
++ /* Clear all bits pending in that task */
++ warnpending &= ~(tsk->softirqs_raised);
++ warnpending &= ~(1 << i);
++ }
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ warnpending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10 &&
++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
++
++#else /* !CONFIG_NO_HZ_COMMON */
++static inline void softirq_set_runner(unsigned int sirq) { }
++static inline void softirq_clr_runner(unsigned int sirq) { }
++#endif
++
+ /*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+@@ -77,6 +175,79 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void wakeup_timer_softirqd(void)
++{
++ /* Interrupts are disabled: no need to stop preemption */
++ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
++
++ if (tsk && tsk->state != TASK_RUNNING)
++ wake_up_process(tsk);
++}
++#endif
++
++static void handle_softirq(unsigned int vec_nr)
++{
++ struct softirq_action *h = softirq_vec + vec_nr;
++ int prev_count;
++
++ prev_count = preempt_count();
++
++ kstat_incr_softirqs_this_cpu(vec_nr);
++
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++ if (unlikely(prev_count != preempt_count())) {
++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, preempt_count());
++ preempt_count_set(prev_count);
++ }
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return local_softirq_pending();
++}
++
++static void handle_pending_softirqs(u32 pending)
++{
++ struct softirq_action *h = softirq_vec;
++ int softirq_bit;
++
++ local_irq_enable();
++
++ h = softirq_vec;
++
++ while ((softirq_bit = ffs(pending))) {
++ unsigned int vec_nr;
++
++ h += softirq_bit - 1;
++ vec_nr = h - softirq_vec;
++ handle_softirq(vec_nr);
++
++ h++;
++ pending >>= softirq_bit;
++ }
++
++ rcu_bh_qs();
++ local_irq_disable();
++}
++
++static void run_ksoftirqd(unsigned int cpu)
++{
++ local_irq_disable();
++ if (ksoftirqd_softirq_pending()) {
++ __do_softirq();
++ local_irq_enable();
++ cond_resched_rcu_qs();
++ return;
++ }
++ local_irq_enable();
++}
++
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -232,10 +403,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+ unsigned long old_flags = current->flags;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+- struct softirq_action *h;
+ bool in_hardirq;
+ __u32 pending;
+- int softirq_bit;
+
+ /*
+ * Mask out PF_MEMALLOC s current task context is borrowed for the
+@@ -254,36 +423,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- local_irq_enable();
+-
+- h = softirq_vec;
+-
+- while ((softirq_bit = ffs(pending))) {
+- unsigned int vec_nr;
+- int prev_count;
+-
+- h += softirq_bit - 1;
+-
+- vec_nr = h - softirq_vec;
+- prev_count = preempt_count();
+-
+- kstat_incr_softirqs_this_cpu(vec_nr);
+-
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+- vec_nr, softirq_to_name[vec_nr], h->action,
+- prev_count, preempt_count());
+- preempt_count_set(prev_count);
+- }
+- h++;
+- pending >>= softirq_bit;
+- }
+-
+- rcu_bh_qs();
+- local_irq_disable();
++ handle_pending_softirqs(pending);
+
+ pending = local_softirq_pending();
+ if (pending) {
+@@ -320,6 +460,310 @@ asmlinkage __visible void do_softirq(void)
+ }
+
+ /*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an interrupt or softirq, we're done
++ * (this also catches softirq-disabled code). We will
++ * actually run the softirq once we return from
++ * the irq or softirq.
++ *
++ * Otherwise we wake up ksoftirqd to make sure we
++ * schedule the softirq soon.
++ */
++ if (!in_interrupt())
++ wakeup_softirqd();
++}
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++}
++
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static void ksoftirqd_set_sched_params(unsigned int cpu) { }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock per softirq
++ */
++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
++
++void __init softirq_early_init(void)
++{
++ int i;
++
++ for (i = 0; i < NR_SOFTIRQS; i++)
++ local_irq_lock_init(local_softirq_locks[i]);
++}
++
++static void lock_softirq(int which)
++{
++ local_lock(local_softirq_locks[which]);
++}
++
++static void unlock_softirq(int which)
++{
++ local_unlock(local_softirq_locks[which]);
++}
++
++static void do_single_softirq(int which)
++{
++ unsigned long old_flags = current->flags;
++
++ current->flags &= ~PF_MEMALLOC;
++ vtime_account_irq_enter(current);
++ current->flags |= PF_IN_SOFTIRQ;
++ lockdep_softirq_enter();
++ local_irq_enable();
++ handle_softirq(which);
++ local_irq_disable();
++ lockdep_softirq_exit();
++ current->flags &= ~PF_IN_SOFTIRQ;
++ vtime_account_irq_enter(current);
++ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
++}
++
++/*
++ * Called with interrupts disabled. Process softirqs which were raised
++ * in current context (or on behalf of ksoftirqd).
++ */
++static void do_current_softirqs(void)
++{
++ while (current->softirqs_raised) {
++ int i = __ffs(current->softirqs_raised);
++ unsigned int pending, mask = (1U << i);
++
++ current->softirqs_raised &= ~mask;
++ local_irq_enable();
++
++ /*
++ * If the lock is contended, we boost the owner to
++ * process the softirq or leave the critical section
++ * now.
++ */
++ lock_softirq(i);
++ local_irq_disable();
++ softirq_set_runner(i);
++ /*
++ * Check with the local_softirq_pending() bits,
++ * whether we need to process this still or if someone
++ * else took care of it.
++ */
++ pending = local_softirq_pending();
++ if (pending & mask) {
++ set_softirq_pending(pending & ~mask);
++ do_single_softirq(i);
++ }
++ softirq_clr_runner(i);
++ WARN_ON(current->softirq_nestcnt != 1);
++ local_irq_enable();
++ unlock_softirq(i);
++ local_irq_disable();
++ }
++}
++
++void __local_bh_disable(void)
++{
++ if (++current->softirq_nestcnt == 1)
++ migrate_disable();
++}
++EXPORT_SYMBOL(__local_bh_disable);
++
++void __local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++
++ local_irq_disable();
++ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
++ do_current_softirqs();
++ local_irq_enable();
++
++ if (--current->softirq_nestcnt == 0)
++ migrate_enable();
++}
++EXPORT_SYMBOL(__local_bh_enable);
++
++void _local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++ if (--current->softirq_nestcnt == 0)
++ migrate_enable();
++}
++EXPORT_SYMBOL(_local_bh_enable);
++
++int in_serving_softirq(void)
++{
++ return current->flags & PF_IN_SOFTIRQ;
++}
++EXPORT_SYMBOL(in_serving_softirq);
++
++/* Called with preemption disabled */
++static void run_ksoftirqd(unsigned int cpu)
++{
++ local_irq_disable();
++ current->softirq_nestcnt++;
++
++ do_current_softirqs();
++ current->softirq_nestcnt--;
++ local_irq_enable();
++ cond_resched_rcu_qs();
++}
++
++/*
++ * Called from netif_rx_ni(). Preemption enabled, but migration
++ * disabled. So the cpu can't go away under us.
++ */
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq() && current->softirqs_raised) {
++ current->softirq_nestcnt++;
++ do_current_softirqs();
++ current->softirq_nestcnt--;
++ }
++}
++
++static void do_raise_softirq_irqoff(unsigned int nr)
++{
++ unsigned int mask;
++
++ mask = 1UL << nr;
++
++ trace_softirq_raise(nr);
++ or_softirq_pending(mask);
++
++ /*
++ * If we are not in a hard interrupt and inside a bh disabled
++ * region, we simply raise the flag on current. local_bh_enable()
++ * will make sure that the softirq is executed. Otherwise we
++ * delegate it to ksoftirqd.
++ */
++ if (!in_irq() && current->softirq_nestcnt)
++ current->softirqs_raised |= mask;
++ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
++ return;
++
++ if (mask & TIMER_SOFTIRQS)
++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
++ else
++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
++}
++
++static void wakeup_proper_softirq(unsigned int nr)
++{
++ if ((1UL << nr) & TIMER_SOFTIRQS)
++ wakeup_timer_softirqd();
++ else
++ wakeup_softirqd();
++}
++
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ do_raise_softirq_irqoff(nr);
++ if (!in_irq() && !current->softirq_nestcnt)
++ wakeup_proper_softirq(nr);
++}
++
++/*
++ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
++ */
++void __raise_softirq_irqoff_ksoft(unsigned int nr)
++{
++ unsigned int mask;
++
++ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
++ !__this_cpu_read(ktimer_softirqd)))
++ return;
++ mask = 1UL << nr;
++
++ trace_softirq_raise(nr);
++ or_softirq_pending(mask);
++ if (mask & TIMER_SOFTIRQS)
++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
++ else
++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
++ wakeup_proper_softirq(nr);
++}
++
++/*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ do_raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an hard interrupt we let irq return code deal
++ * with the wakeup of ksoftirqd.
++ */
++ if (in_irq())
++ return;
++ /*
++ * If we are in thread context but outside of a bh disabled
++ * region, we need to wake ksoftirqd as well.
++ *
++ * CHECKME: Some of the places which do that could be wrapped
++ * into local_bh_disable/enable pairs. Though it's unclear
++ * whether this is worth the effort. To find those places just
++ * raise a WARN() if the condition is met.
++ */
++ if (!current->softirq_nestcnt)
++ wakeup_proper_softirq(nr);
++}
++
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return current->softirqs_raised;
++}
++
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
++
++static inline void ksoftirqd_set_sched_params(unsigned int cpu)
++{
++ /* Take over all but timer pending softirqs when starting */
++ local_irq_disable();
++ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
++ local_irq_enable();
++}
++
++static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
++{
++ struct sched_param param = { .sched_priority = 1 };
++
++ sched_setscheduler(current, SCHED_FIFO, &param);
++
++ /* Take over timer pending softirqs when starting */
++ local_irq_disable();
++ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
++ local_irq_enable();
++}
++
++static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
++ bool online)
++{
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, &param);
++}
++
++static int ktimer_softirqd_should_run(unsigned int cpu)
++{
++ return current->softirqs_raised;
++}
++
++#endif /* PREEMPT_RT_FULL */
++/*
+ * Enter an interrupt context.
+ */
+ void irq_enter(void)
+@@ -330,9 +774,9 @@ void irq_enter(void)
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_irq_enter();
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
+
+ __irq_enter();
+@@ -340,6 +784,7 @@ void irq_enter(void)
+
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads) {
+ #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+ /*
+@@ -359,6 +804,18 @@ static inline void invoke_softirq(void)
+ } else {
+ wakeup_softirqd();
+ }
++#else /* PREEMPT_RT_FULL */
++ unsigned long flags;
++
++ local_irq_save(flags);
++ if (__this_cpu_read(ksoftirqd) &&
++ __this_cpu_read(ksoftirqd)->softirqs_raised)
++ wakeup_softirqd();
++ if (__this_cpu_read(ktimer_softirqd) &&
++ __this_cpu_read(ktimer_softirqd)->softirqs_raised)
++ wakeup_timer_softirqd();
++ local_irq_restore(flags);
++#endif
+ }
+
+ static inline void tick_irq_exit(void)
+@@ -395,26 +852,6 @@ void irq_exit(void)
+ trace_hardirq_exit(); /* must be last! */
+ }
+
+-/*
+- * This function must run with irqs disabled!
+- */
+-inline void raise_softirq_irqoff(unsigned int nr)
+-{
+- __raise_softirq_irqoff(nr);
+-
+- /*
+- * If we're in an interrupt or softirq, we're done
+- * (this also catches softirq-disabled code). We will
+- * actually run the softirq once we return from
+- * the irq or softirq.
+- *
+- * Otherwise we wake up ksoftirqd to make sure we
+- * schedule the softirq soon.
+- */
+- if (!in_interrupt())
+- wakeup_softirqd();
+-}
+-
+ void raise_softirq(unsigned int nr)
+ {
+ unsigned long flags;
+@@ -424,12 +861,6 @@ void raise_softirq(unsigned int nr)
+ local_irq_restore(flags);
+ }
+
+-void __raise_softirq_irqoff(unsigned int nr)
+-{
+- trace_softirq_raise(nr);
+- or_softirq_pending(1UL << nr);
+-}
+-
+ void open_softirq(int nr, void (*action)(struct softirq_action *))
+ {
+ softirq_vec[nr].action = action;
+@@ -446,15 +877,45 @@ struct tasklet_head {
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+
++static void inline
++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
++{
++ if (tasklet_trylock(t)) {
++again:
++ /* We may have been preempted before tasklet_trylock
++ * and __tasklet_action may have already run.
++ * So double check the sched bit while the takslet
++ * is locked before adding it to the list.
++ */
++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
++ t->next = NULL;
++ *head->tail = t;
++ head->tail = &(t->next);
++ raise_softirq_irqoff(nr);
++ tasklet_unlock(t);
++ } else {
++ /* This is subtle. If we hit the corner case above
++ * It is possible that we get preempted right here,
++ * and another task has successfully called
++ * tasklet_schedule(), then this function, and
++ * failed on the trylock. Thus we must be sure
++ * before releasing the tasklet lock, that the
++ * SCHED_BIT is clear. Otherwise the tasklet
++ * may get its SCHED_BIT set, but not added to the
++ * list
++ */
++ if (!tasklet_tryunlock(t))
++ goto again;
++ }
++ }
++}
++
+ void __tasklet_schedule(struct tasklet_struct *t)
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- raise_softirq_irqoff(TASKLET_SOFTIRQ);
++ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(__tasklet_schedule);
+@@ -464,10 +925,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+@@ -476,82 +934,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ {
+ BUG_ON(!irqs_disabled());
+
+- t->next = __this_cpu_read(tasklet_hi_vec.head);
+- __this_cpu_write(tasklet_hi_vec.head, t);
+- __raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_hi_schedule(t);
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++void tasklet_enable(struct tasklet_struct *t)
+ {
+- struct tasklet_struct *list;
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_schedule(t);
++}
++EXPORT_SYMBOL(tasklet_enable);
+
+- local_irq_disable();
+- list = __this_cpu_read(tasklet_vec.head);
+- __this_cpu_write(tasklet_vec.head, NULL);
+- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
+- local_irq_enable();
++static void __tasklet_action(struct softirq_action *a,
++ struct tasklet_struct *list)
++{
++ int loops = 1000000;
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+- &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
++ /*
++ * Should always succeed - after a tasklist got on the
++ * list (after getting the SCHED bit set from 0 to 1),
++ * nothing but the tasklet softirq it got queued to can
++ * lock it:
++ */
++ if (!tasklet_trylock(t)) {
++ WARN_ON(1);
++ continue;
+ }
+
+- local_irq_disable();
+ t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
+- local_irq_enable();
++
++ /*
++ * If we cannot handle the tasklet because it's disabled,
++ * mark it as pending. tasklet_enable() will later
++ * re-schedule the tasklet.
++ */
++ if (unlikely(atomic_read(&t->count))) {
++out_disabled:
++ /* implicit unlock: */
++ wmb();
++ t->state = TASKLET_STATEF_PENDING;
++ continue;
++ }
++
++ /*
++ * After this point on the tasklet might be rescheduled
++ * on another CPU, but it can only be added to another
++ * CPU's tasklet list if we unlock the tasklet (which we
++ * dont do yet).
++ */
++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ WARN_ON(1);
++
++again:
++ t->func(t->data);
++
++ /*
++ * Try to unlock the tasklet. We must use cmpxchg, because
++ * another CPU might have scheduled or disabled the tasklet.
++ * We only allow the STATE_RUN -> 0 transition here.
++ */
++ while (!tasklet_tryunlock(t)) {
++ /*
++ * If it got disabled meanwhile, bail out:
++ */
++ if (atomic_read(&t->count))
++ goto out_disabled;
++ /*
++ * If it got scheduled meanwhile, re-execute
++ * the tasklet function:
++ */
++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ goto again;
++ if (!--loops) {
++ printk("hm, tasklet state: %08lx\n", t->state);
++ WARN_ON(1);
++ tasklet_unlock(t);
++ break;
++ }
++ }
+ }
+ }
+
++static void tasklet_action(struct softirq_action *a)
++{
++ struct tasklet_struct *list;
++
++ local_irq_disable();
++
++ list = __this_cpu_read(tasklet_vec.head);
++ __this_cpu_write(tasklet_vec.head, NULL);
++ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
++
++ local_irq_enable();
++
++ __tasklet_action(a, list);
++}
++
+ static void tasklet_hi_action(struct softirq_action *a)
+ {
+ struct tasklet_struct *list;
+
+ local_irq_disable();
++
+ list = __this_cpu_read(tasklet_hi_vec.head);
+ __this_cpu_write(tasklet_hi_vec.head, NULL);
+ __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
++
+ local_irq_enable();
+
+- while (list) {
+- struct tasklet_struct *t = list;
+-
+- list = list->next;
+-
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+- &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
+- }
+-
+- local_irq_disable();
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- __raise_softirq_irqoff(HI_SOFTIRQ);
+- local_irq_enable();
+- }
++ __tasklet_action(a, list);
+ }
+
+ void tasklet_init(struct tasklet_struct *t,
+@@ -572,7 +1070,7 @@ void tasklet_kill(struct tasklet_struct *t)
+
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ do {
+- yield();
++ msleep(1);
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ }
+ tasklet_unlock_wait(t);
+@@ -646,25 +1144,26 @@ void __init softirq_init(void)
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+ }
+
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++ /*
++ * Hack for now to avoid this busy-loop:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ msleep(1);
++#else
++ barrier();
++#endif
++ }
++}
++EXPORT_SYMBOL(tasklet_unlock_wait);
++#endif
++
+ static int ksoftirqd_should_run(unsigned int cpu)
+ {
+- return local_softirq_pending();
+-}
+-
+-static void run_ksoftirqd(unsigned int cpu)
+-{
+- local_irq_disable();
+- if (local_softirq_pending()) {
+- /*
+- * We can safely run softirq on inline stack, as we are not deep
+- * in the task stack here.
+- */
+- __do_softirq();
+- local_irq_enable();
+- cond_resched_rcu_qs();
+- return;
+- }
+- local_irq_enable();
++ return ksoftirqd_softirq_pending();
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -746,16 +1245,31 @@ static struct notifier_block cpu_nfb = {
+
+ static struct smp_hotplug_thread softirq_threads = {
+ .store = &ksoftirqd,
++ .setup = ksoftirqd_set_sched_params,
+ .thread_should_run = ksoftirqd_should_run,
+ .thread_fn = run_ksoftirqd,
+ .thread_comm = "ksoftirqd/%u",
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static struct smp_hotplug_thread softirq_timer_threads = {
++ .store = &ktimer_softirqd,
++ .setup = ktimer_softirqd_set_sched_params,
++ .cleanup = ktimer_softirqd_clr_sched_params,
++ .thread_should_run = ktimer_softirqd_should_run,
++ .thread_fn = run_ksoftirqd,
++ .thread_comm = "ktimersoftd/%u",
++};
++#endif
++
+ static __init int spawn_ksoftirqd(void)
+ {
+ register_cpu_notifier(&cpu_nfb);
+
+ BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
++#endif
+
+ return 0;
+ }
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 4a1ca5f6da7e..3226e22b9e42 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -37,7 +37,7 @@ struct cpu_stop_done {
+ struct cpu_stopper {
+ struct task_struct *thread;
+
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ bool enabled; /* is this stopper enabled? */
+ struct list_head works; /* list of pending works */
+
+@@ -83,14 +83,14 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+ unsigned long flags;
+ bool enabled;
+
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+ enabled = stopper->enabled;
+ if (enabled)
+ __cpu_stop_queue_work(stopper, work);
+ else if (work->done)
+ cpu_stop_signal_done(work->done);
+- spin_unlock_irqrestore(&stopper->lock, flags);
+
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ return enabled;
+ }
+
+@@ -232,8 +232,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ int err;
+
+ lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+- spin_lock_irq(&stopper1->lock);
+- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock_irq(&stopper1->lock);
++ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+
+ err = -ENOENT;
+ if (!stopper1->enabled || !stopper2->enabled)
+@@ -243,8 +243,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ __cpu_stop_queue_work(stopper1, work1);
+ __cpu_stop_queue_work(stopper2, work2);
+ unlock:
+- spin_unlock(&stopper2->lock);
+- spin_unlock_irq(&stopper1->lock);
++ raw_spin_unlock(&stopper2->lock);
++ raw_spin_unlock_irq(&stopper1->lock);
+ lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
+ return err;
+@@ -321,18 +321,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
+
+ static bool queue_stop_cpus_work(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg,
+- struct cpu_stop_done *done)
++ struct cpu_stop_done *done, bool inactive)
+ {
+ struct cpu_stop_work *work;
+ unsigned int cpu;
+ bool queued = false;
+
+ /*
+- * Disable preemption while queueing to avoid getting
+- * preempted by a stopper which might wait for other stoppers
+- * to enter @fn which can lead to deadlock.
++ * Make sure that all work is queued on all cpus before
++ * any of the cpus can execute it.
+ */
+- lg_global_lock(&stop_cpus_lock);
++ if (!inactive)
++ lg_global_lock(&stop_cpus_lock);
++ else
++ lg_global_trylock_relax(&stop_cpus_lock);
++
+ for_each_cpu(cpu, cpumask) {
+ work = &per_cpu(cpu_stopper.stop_work, cpu);
+ work->fn = fn;
+@@ -352,7 +355,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
+ struct cpu_stop_done done;
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+- if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
++ if (!queue_stop_cpus_work(cpumask, fn, arg, &done, false))
+ return -ENOENT;
+ wait_for_completion(&done.completion);
+ return done.ret;
+@@ -433,9 +436,9 @@ static int cpu_stop_should_run(unsigned int cpu)
+ unsigned long flags;
+ int run;
+
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+ run = !list_empty(&stopper->works);
+- spin_unlock_irqrestore(&stopper->lock, flags);
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ return run;
+ }
+
+@@ -446,13 +449,13 @@ static void cpu_stopper_thread(unsigned int cpu)
+
+ repeat:
+ work = NULL;
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ if (!list_empty(&stopper->works)) {
+ work = list_first_entry(&stopper->works,
+ struct cpu_stop_work, list);
+ list_del_init(&work->list);
+ }
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+
+ if (work) {
+ cpu_stop_fn_t fn = work->fn;
+@@ -460,6 +463,16 @@ static void cpu_stopper_thread(unsigned int cpu)
+ struct cpu_stop_done *done = work->done;
+ int ret;
+
++ /*
++ * Wait until the stopper finished scheduling on all
++ * cpus
++ */
++ lg_global_lock(&stop_cpus_lock);
++ /*
++ * Let other cpu threads continue as well
++ */
++ lg_global_unlock(&stop_cpus_lock);
++
+ /* cpu stop callbacks must not sleep, make in_atomic() == T */
+ preempt_count_inc();
+ ret = fn(arg);
+@@ -526,10 +539,12 @@ static int __init cpu_stop_init(void)
+ for_each_possible_cpu(cpu) {
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+- spin_lock_init(&stopper->lock);
++ raw_spin_lock_init(&stopper->lock);
+ INIT_LIST_HEAD(&stopper->works);
+ }
+
++ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
++
+ BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
+ stop_machine_unpark(raw_smp_processor_id());
+ stop_machine_initialized = true;
+@@ -624,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ set_state(&msdata, MULTI_STOP_PREPARE);
+ cpu_stop_init_done(&done, num_active_cpus());
+ queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
+- &done);
++ &done, true);
+ ret = multi_cpu_stop(&msdata);
+
+ /* Busy wait for completion. */
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 9ba7c820fc23..d85f638fd99e 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -53,6 +53,7 @@
+ #include <asm/uaccess.h>
+
+ #include <trace/events/timer.h>
++#include <trace/events/hist.h>
+
+ #include "tick-internal.h"
+
+@@ -695,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
+ retrigger_next_event(NULL);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++static struct swork_event clock_set_delay_work;
++
++static void run_clock_set_delay(struct swork_event *event)
++{
++ clock_was_set();
++}
++
++void clock_was_set_delayed(void)
++{
++ swork_queue(&clock_set_delay_work);
++}
++
++static __init int create_clock_set_delay_thread(void)
++{
++ WARN_ON(swork_get());
++ INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
++ return 0;
++}
++early_initcall(create_clock_set_delay_thread);
++#else /* PREEMPT_RT_FULL */
++
+ static void clock_was_set_work(struct work_struct *work)
+ {
+ clock_was_set();
+@@ -710,6 +734,7 @@ void clock_was_set_delayed(void)
+ {
+ schedule_work(&hrtimer_work);
+ }
++#endif
+
+ #else
+
+@@ -719,11 +744,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
+ static inline void hrtimer_switch_to_hres(void) { }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+-static inline int hrtimer_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base)
+-{
+- return 0;
+-}
++static inline void hrtimer_reprogram(struct hrtimer *timer,
++ struct hrtimer_clock_base *base) { }
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
+ static inline void retrigger_next_event(void *arg) { }
+
+@@ -855,6 +877,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_forward);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
++
++/**
++ * hrtimer_wait_for_timer - Wait for a running timer
++ *
++ * @timer: timer to wait for
++ *
++ * The function waits in case the timers callback function is
++ * currently executed on the waitqueue of the timer base. The
++ * waitqueue is woken up after the timer callback function has
++ * finished execution.
++ */
++void hrtimer_wait_for_timer(const struct hrtimer *timer)
++{
++ struct hrtimer_clock_base *base = timer->base;
++
++ if (base && base->cpu_base && !timer->irqsafe)
++ wait_event(base->cpu_base->wait,
++ !(hrtimer_callback_running(timer)));
++}
++
++#else
++# define wake_up_timer_waiters(b) do { } while (0)
++#endif
++
+ /*
+ * enqueue_hrtimer - internal function to (re)start a timer
+ *
+@@ -896,6 +944,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
+ if (!(state & HRTIMER_STATE_ENQUEUED))
+ return;
+
++ if (unlikely(!list_empty(&timer->cb_entry))) {
++ list_del_init(&timer->cb_entry);
++ return;
++ }
++
+ if (!timerqueue_del(&base->active, &timer->node))
+ cpu_base->active_bases &= ~(1 << base->index);
+
+@@ -991,7 +1044,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
+ timer_stats_hrtimer_set_start_info(timer);
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ {
++ ktime_t now = new_base->get_time();
+
++ if (ktime_to_ns(tim) < ktime_to_ns(now))
++ timer->praecox = now;
++ else
++ timer->praecox = ktime_set(0, 0);
++ }
++#endif
+ leftmost = enqueue_hrtimer(timer, new_base);
+ if (!leftmost)
+ goto unlock;
+@@ -1063,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer)
+
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ hrtimer_wait_for_timer(timer);
+ }
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_cancel);
+@@ -1127,6 +1189,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+
+ base = hrtimer_clockid_to_base(clock_id);
+ timer->base = &cpu_base->clock_base[base];
++ INIT_LIST_HEAD(&timer->cb_entry);
+ timerqueue_init(&timer->node);
+
+ #ifdef CONFIG_TIMER_STATS
+@@ -1167,6 +1230,7 @@ bool hrtimer_active(const struct hrtimer *timer)
+ seq = raw_read_seqcount_begin(&cpu_base->seq);
+
+ if (timer->state != HRTIMER_STATE_INACTIVE ||
++ cpu_base->running_soft == timer ||
+ cpu_base->running == timer)
+ return true;
+
+@@ -1265,10 +1329,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
+ cpu_base->running = NULL;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
++ struct hrtimer_clock_base *base)
++{
++ int leftmost;
++
++ if (restart != HRTIMER_NORESTART &&
++ !(timer->state & HRTIMER_STATE_ENQUEUED)) {
++
++ leftmost = enqueue_hrtimer(timer, base);
++ if (!leftmost)
++ return;
++#ifdef CONFIG_HIGH_RES_TIMERS
++ if (!hrtimer_is_hres_active(timer)) {
++ /*
++ * Kick to reschedule the next tick to handle the new timer
++ * on dynticks target.
++ */
++ if (base->cpu_base->nohz_active)
++ wake_up_nohz_cpu(base->cpu_base->cpu);
++ } else {
++
++ hrtimer_reprogram(timer, base);
++ }
++#endif
++ }
++}
++
++/*
++ * The changes in mainline which removed the callback modes from
++ * hrtimer are not yet working with -rt. The non wakeup_process()
++ * based callbacks which involve sleeping locks need to be treated
++ * seperately.
++ */
++static void hrtimer_rt_run_pending(void)
++{
++ enum hrtimer_restart (*fn)(struct hrtimer *);
++ struct hrtimer_cpu_base *cpu_base;
++ struct hrtimer_clock_base *base;
++ struct hrtimer *timer;
++ int index, restart;
++
++ local_irq_disable();
++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
++
++ raw_spin_lock(&cpu_base->lock);
++
++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
++ base = &cpu_base->clock_base[index];
++
++ while (!list_empty(&base->expired)) {
++ timer = list_first_entry(&base->expired,
++ struct hrtimer, cb_entry);
++
++ /*
++ * Same as the above __run_hrtimer function
++ * just we run with interrupts enabled.
++ */
++ debug_deactivate(timer);
++ cpu_base->running_soft = timer;
++ raw_write_seqcount_barrier(&cpu_base->seq);
++
++ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
++ timer_stats_account_hrtimer(timer);
++ fn = timer->function;
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++ restart = fn(timer);
++ raw_spin_lock_irq(&cpu_base->lock);
++
++ hrtimer_rt_reprogram(restart, timer, base);
++ raw_write_seqcount_barrier(&cpu_base->seq);
++
++ WARN_ON_ONCE(cpu_base->running_soft != timer);
++ cpu_base->running_soft = NULL;
++ }
++ }
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++
++ wake_up_timer_waiters(cpu_base);
++}
++
++static int hrtimer_rt_defer(struct hrtimer *timer)
++{
++ if (timer->irqsafe)
++ return 0;
++
++ __remove_hrtimer(timer, timer->base, timer->state, 0);
++ list_add_tail(&timer->cb_entry, &timer->base->expired);
++ return 1;
++}
++
++#else
++
++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
++
++#endif
++
++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
++
+ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+ {
+ struct hrtimer_clock_base *base = cpu_base->clock_base;
+ unsigned int active = cpu_base->active_bases;
++ int raise = 0;
+
+ for (; active; base++, active >>= 1) {
+ struct timerqueue_node *node;
+@@ -1284,6 +1450,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+
+ timer = container_of(node, struct hrtimer, node);
+
++ trace_hrtimer_interrupt(raw_smp_processor_id(),
++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
++ timer->praecox : hrtimer_get_expires(timer),
++ basenow)),
++ current,
++ timer->function == hrtimer_wakeup ?
++ container_of(timer, struct hrtimer_sleeper,
++ timer)->task : NULL);
++
+ /*
+ * The immediate goal for using the softexpires is
+ * minimizing wakeups, not running timers at the
+@@ -1299,9 +1474,14 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+ if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
+ break;
+
+- __run_hrtimer(cpu_base, base, timer, &basenow);
++ if (!hrtimer_rt_defer(timer))
++ __run_hrtimer(cpu_base, base, timer, &basenow);
++ else
++ raise = 1;
+ }
+ }
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+@@ -1464,16 +1644,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
+ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
+ {
+ sl->timer.function = hrtimer_wakeup;
++ sl->timer.irqsafe = 1;
+ sl->task = task;
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+
+-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
++static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
++ unsigned long state)
+ {
+ hrtimer_init_sleeper(t, current);
+
+ do {
+- set_current_state(TASK_INTERRUPTIBLE);
++ set_current_state(state);
+ hrtimer_start_expires(&t->timer, mode);
+
+ if (likely(t->task))
+@@ -1515,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
+ HRTIMER_MODE_ABS);
+ hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
+
+- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
++ /* cpu_chill() does not care about restart state. */
++ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
+ goto out;
+
+ rmtp = restart->nanosleep.rmtp;
+@@ -1532,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
+ return ret;
+ }
+
+-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+- const enum hrtimer_mode mode, const clockid_t clockid)
++static long
++__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
++ const enum hrtimer_mode mode, const clockid_t clockid,
++ unsigned long state)
+ {
+ struct restart_block *restart;
+ struct hrtimer_sleeper t;
+@@ -1546,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+
+ hrtimer_init_on_stack(&t.timer, clockid, mode);
+ hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
+- if (do_nanosleep(&t, mode))
++ if (do_nanosleep(&t, mode, state))
+ goto out;
+
+ /* Absolute timers do not update the rmtp value and restart: */
+@@ -1573,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ return ret;
+ }
+
++long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
++ const enum hrtimer_mode mode, const clockid_t clockid)
++{
++ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
++}
++
+ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+ struct timespec __user *, rmtp)
+ {
+@@ -1587,6 +1778,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+ return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Sleep for 1 ms in hope whoever holds what we want will let it go.
++ */
++void cpu_chill(void)
++{
++ struct timespec tu = {
++ .tv_nsec = NSEC_PER_MSEC,
++ };
++ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
++
++ current->flags |= PF_NOFREEZE;
++ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
++ TASK_UNINTERRUPTIBLE);
++ if (!freeze_flag)
++ current->flags &= ~PF_NOFREEZE;
++}
++EXPORT_SYMBOL(cpu_chill);
++#endif
++
+ /*
+ * Functions related to boot-time initialization:
+ */
+@@ -1598,10 +1809,14 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ cpu_base->clock_base[i].cpu_base = cpu_base;
+ timerqueue_init_head(&cpu_base->clock_base[i].active);
++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
+ }
+
+ cpu_base->cpu = cpu;
+ hrtimer_init_hres(cpu_base);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ init_waitqueue_head(&cpu_base->wait);
++#endif
+ return 0;
+ }
+
+@@ -1671,9 +1886,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++static void run_hrtimer_softirq(struct softirq_action *h)
++{
++ hrtimer_rt_run_pending();
++}
++
++static void hrtimers_open_softirq(void)
++{
++ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
++}
++
++#else
++static void hrtimers_open_softirq(void) { }
++#endif
++
+ void __init hrtimers_init(void)
+ {
+ hrtimers_prepare_cpu(smp_processor_id());
++ hrtimers_open_softirq();
+ }
+
+ /**
+diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
+index 1d5c7204ddc9..184de6751180 100644
+--- a/kernel/time/itimer.c
++++ b/kernel/time/itimer.c
+@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
+ /* We are sharing ->siglock with it_real_fn() */
+ if (hrtimer_try_to_cancel(timer) < 0) {
+ spin_unlock_irq(&tsk->sighand->siglock);
++ hrtimer_wait_for_timer(&tsk->signal->real_timer);
+ goto again;
+ }
+ expires = timeval_to_ktime(value->it_value);
+diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
+index 555e21f7b966..a5d6435fabbb 100644
+--- a/kernel/time/jiffies.c
++++ b/kernel/time/jiffies.c
+@@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
+ .max_cycles = 10,
+ };
+
+-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
++__cacheline_aligned_in_smp seqcount_t jiffies_seq;
+
+ #if (BITS_PER_LONG < 64)
+ u64 get_jiffies_64(void)
+@@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
+ u64 ret;
+
+ do {
+- seq = read_seqbegin(&jiffies_lock);
++ seq = read_seqcount_begin(&jiffies_seq);
+ ret = jiffies_64;
+- } while (read_seqretry(&jiffies_lock, seq));
++ } while (read_seqcount_retry(&jiffies_seq, seq));
+ return ret;
+ }
+ EXPORT_SYMBOL(get_jiffies_64);
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 6df8927c58a5..05b7391bf9bd 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -17,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/rtc.h>
+ #include <linux/math64.h>
++#include <linux/swork.h>
+
+ #include "ntp_internal.h"
+ #include "timekeeping_internal.h"
+@@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_struct *work)
+ &sync_cmos_work, timespec64_to_jiffies(&next));
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++static void run_clock_set_delay(struct swork_event *event)
++{
++ queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
++}
++
++static struct swork_event ntp_cmos_swork;
++
++void ntp_notify_cmos_timer(void)
++{
++ swork_queue(&ntp_cmos_swork);
++}
++
++static __init int create_cmos_delay_thread(void)
++{
++ WARN_ON(swork_get());
++ INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay);
++ return 0;
++}
++early_initcall(create_cmos_delay_thread);
++
++#else
++
+ void ntp_notify_cmos_timer(void)
+ {
+ queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
+ }
++#endif /* CONFIG_PREEMPT_RT_FULL */
+
+ #else
+ void ntp_notify_cmos_timer(void) { }
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 39008d78927a..633f4eaca9e7 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -3,6 +3,7 @@
+ */
+
+ #include <linux/sched.h>
++#include <linux/sched/rt.h>
+ #include <linux/posix-timers.h>
+ #include <linux/errno.h>
+ #include <linux/math64.h>
+@@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ unlock_task_sighand(p, &flags);
+
+@@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
+ }
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
++{
++ int cpu = (long)data;
++
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
++
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
++
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
++
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
++
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
++
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
++
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
++
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++ }
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posixcputmr/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, &param);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task, cpu),
++ cpumask_any(cpu_online_mask));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
++
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_possible_cpu(cpu)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index f2826c35e918..464a98155a0e 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
+ static struct pid *good_sigevent(sigevent_t * event)
+ {
+ struct task_struct *rtn = current->group_leader;
++ int sig = event->sigev_signo;
+
+ if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+ (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+@@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigevent_t * event)
+ return NULL;
+
+ if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
++ sig_kernel_coredump(sig)))
+ return NULL;
+
+ return task_pid(rtn);
+@@ -826,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
+ return overrun;
+ }
+
++/*
++ * Protected by RCU!
++ */
++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (kc->timer_set == common_timer_set)
++ hrtimer_wait_for_timer(&timr->it.real.timer);
++ else
++ /* FIXME: Whacky hack for posix-cpu-timers */
++ schedule_timeout(1);
++#endif
++}
++
+ /* Set a POSIX.1b interval timer. */
+ /* timr->it_lock is taken. */
+ static int
+@@ -903,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+ if (!timr)
+ return -EINVAL;
+
++ rcu_read_lock();
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ error = -EINVAL;
+@@ -911,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+
+ unlock_timer(timr, flag);
+ if (error == TIMER_RETRY) {
++ timer_wait_for_callback(kc, timr);
+ rtn = NULL; // We already got the old time...
++ rcu_read_unlock();
+ goto retry;
+ }
++ rcu_read_unlock();
+
+ if (old_setting && !error &&
+ copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+@@ -951,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
+ if (!timer)
+ return -EINVAL;
+
++ rcu_read_lock();
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
++ rcu_read_unlock();
+
+ spin_lock(&current->sighand->siglock);
+ list_del(&timer->list);
+@@ -980,8 +1005,18 @@ static void itimer_delete(struct k_itimer *timer)
+ retry_delete:
+ spin_lock_irqsave(&timer->it_lock, flags);
+
+- if (timer_delete_hook(timer) == TIMER_RETRY) {
++ /* On RT we can race with a deletion */
++ if (!timer->it_signal) {
+ unlock_timer(timer, flags);
++ return;
++ }
++
++ if (timer_delete_hook(timer) == TIMER_RETRY) {
++ rcu_read_lock();
++ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
+ list_del(&timer->list);
+diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
+index 690b797f522e..fe8ba1619879 100644
+--- a/kernel/time/tick-broadcast-hrtimer.c
++++ b/kernel/time/tick-broadcast-hrtimer.c
+@@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
+ {
+ hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ bctimer.function = bc_handler;
++ bctimer.irqsafe = true;
+ clockevents_register_device(&ce_broadcast_hrtimer);
+ }
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 4fcd99e12aa0..5a47f2e98faf 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
+ static void tick_periodic(int cpu)
+ {
+ if (tick_do_timer_cpu == cpu) {
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+
+ /* Keep track of the next tick event */
+ tick_next_period = ktime_add(tick_next_period, tick_period);
+
+ do_timer(1);
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+
+@@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
+ ktime_t next;
+
+ do {
+- seq = read_seqbegin(&jiffies_lock);
++ seq = read_seqcount_begin(&jiffies_seq);
+ next = tick_next_period;
+- } while (read_seqretry(&jiffies_lock, seq));
++ } while (read_seqcount_retry(&jiffies_seq, seq));
+
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 2ec7c00228f3..c1b30b8c671a 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now)
+ return;
+
+ /* Reevaluate with jiffies_lock held */
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+
+ delta = ktime_sub(now, last_jiffies_update);
+ if (delta.tv64 >= tick_period.tv64) {
+@@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now)
+ /* Keep the tick_next_period variable up to date */
+ tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ } else {
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+
+@@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void)
+ {
+ ktime_t period;
+
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+ /* Did we start the jiffies update yet ? */
+ if (last_jiffies_update.tv64 == 0)
+ last_jiffies_update = tick_next_period;
+ period = last_jiffies_update;
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ return period;
+ }
+
+@@ -212,6 +217,7 @@ static void nohz_full_kick_func(struct irq_work *work)
+
+ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
+ .func = nohz_full_kick_func,
++ .flags = IRQ_WORK_HARD_IRQ,
+ };
+
+ /*
+@@ -670,10 +676,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+
+ /* Read jiffies and the time when jiffies were updated last */
+ do {
+- seq = read_seqbegin(&jiffies_lock);
++ seq = read_seqcount_begin(&jiffies_seq);
+ basemono = last_jiffies_update.tv64;
+ basejiff = jiffies;
+- } while (read_seqretry(&jiffies_lock, seq));
++ } while (read_seqcount_retry(&jiffies_seq, seq));
+ ts->last_jiffies = basejiff;
+
+ if (rcu_needs_cpu(basemono, &next_rcu) ||
+@@ -874,14 +880,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ return false;
+
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+- static int ratelimit;
+-
+- if (ratelimit < 10 &&
+- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+- pr_warn("NOHZ: local_softirq_pending %02x\n",
+- (unsigned int) local_softirq_pending());
+- ratelimit++;
+- }
++ softirq_check_pending_idle();
+ return false;
+ }
+
+@@ -1190,6 +1189,7 @@ void tick_setup_sched_timer(void)
+ * Emulate tick processing via per-CPU hrtimers:
+ */
+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ ts->sched_timer.irqsafe = 1;
+ ts->sched_timer.function = tick_sched_timer;
+
+ /* Get the next period (per-CPU) */
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 37dec7e3db43..fa8d90d2acc3 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -2328,8 +2328,10 @@ EXPORT_SYMBOL(hardpps);
+ */
+ void xtime_update(unsigned long ticks)
+ {
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+ do_timer(ticks);
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
+index 704f595ce83f..763a3e5121ff 100644
+--- a/kernel/time/timekeeping.h
++++ b/kernel/time/timekeeping.h
+@@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
+ extern void do_timer(unsigned long ticks);
+ extern void update_wall_time(void);
+
+-extern seqlock_t jiffies_lock;
++extern raw_spinlock_t jiffies_lock;
++extern seqcount_t jiffies_seq;
+
+ #define CS_NAME_LEN 32
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 32bf6f75a8fe..ba53447a03f5 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
+ #endif
+
+ struct timer_base {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct timer_list *running_timer;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct swait_queue_head wait_for_running_timer;
++#endif
+ unsigned long clk;
+ unsigned long next_expiry;
+ unsigned int cpu;
+@@ -947,10 +950,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
+
+ if (!(tf & TIMER_MIGRATING)) {
+ base = get_timer_base(tf);
+- spin_lock_irqsave(&base->lock, *flags);
++ raw_spin_lock_irqsave(&base->lock, *flags);
+ if (timer->flags == tf)
+ return base;
+- spin_unlock_irqrestore(&base->lock, *flags);
++ raw_spin_unlock_irqrestore(&base->lock, *flags);
+ }
+ cpu_relax();
+ }
+@@ -1017,9 +1020,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ /* See the comment in lock_timer_base() */
+ timer->flags |= TIMER_MIGRATING;
+
+- spin_unlock(&base->lock);
++ raw_spin_unlock(&base->lock);
+ base = new_base;
+- spin_lock(&base->lock);
++ raw_spin_lock(&base->lock);
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | base->cpu);
+ }
+@@ -1040,7 +1043,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ }
+
+ out_unlock:
+- spin_unlock_irqrestore(&base->lock, flags);
++ raw_spin_unlock_irqrestore(&base->lock, flags);
+
+ return ret;
+ }
+@@ -1134,19 +1137,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
+ if (base != new_base) {
+ timer->flags |= TIMER_MIGRATING;
+
+- spin_unlock(&base->lock);
++ raw_spin_unlock(&base->lock);
+ base = new_base;
+- spin_lock(&base->lock);
++ raw_spin_lock(&base->lock);
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | cpu);
+ }
+
+ debug_activate(timer, timer->expires);
+ internal_add_timer(base, timer);
+- spin_unlock_irqrestore(&base->lock, flags);
++ raw_spin_unlock_irqrestore(&base->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(add_timer_on);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Wait for a running timer
++ */
++static void wait_for_running_timer(struct timer_list *timer)
++{
++ struct timer_base *base;
++ u32 tf = timer->flags;
++
++ if (tf & TIMER_MIGRATING)
++ return;
++
++ base = get_timer_base(tf);
++ swait_event(base->wait_for_running_timer,
++ base->running_timer != timer);
++}
++
++# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer)
++#else
++static inline void wait_for_running_timer(struct timer_list *timer)
++{
++ cpu_relax();
++}
++
++# define wakeup_timer_waiters(b) do { } while (0)
++#endif
++
+ /**
+ * del_timer - deactive a timer.
+ * @timer: the timer to be deactivated
+@@ -1170,7 +1200,7 @@ int del_timer(struct timer_list *timer)
+ if (timer_pending(timer)) {
+ base = lock_timer_base(timer, &flags);
+ ret = detach_if_pending(timer, base, true);
+- spin_unlock_irqrestore(&base->lock, flags);
++ raw_spin_unlock_irqrestore(&base->lock, flags);
+ }
+
+ return ret;
+@@ -1198,13 +1228,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
+ timer_stats_timer_clear_start_info(timer);
+ ret = detach_if_pending(timer, base, true);
+ }
+- spin_unlock_irqrestore(&base->lock, flags);
++ raw_spin_unlock_irqrestore(&base->lock, flags);
+
+ return ret;
+ }
+ EXPORT_SYMBOL(try_to_del_timer_sync);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ /**
+ * del_timer_sync - deactivate a timer and wait for the handler to finish.
+ * @timer: the timer to be deactivated
+@@ -1264,7 +1294,7 @@ int del_timer_sync(struct timer_list *timer)
+ int ret = try_to_del_timer_sync(timer);
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ wait_for_running_timer(timer);
+ }
+ }
+ EXPORT_SYMBOL(del_timer_sync);
+@@ -1329,14 +1359,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
+ fn = timer->function;
+ data = timer->data;
+
+- if (timer->flags & TIMER_IRQSAFE) {
+- spin_unlock(&base->lock);
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
++ timer->flags & TIMER_IRQSAFE) {
++ raw_spin_unlock(&base->lock);
+ call_timer_fn(timer, fn, data);
+- spin_lock(&base->lock);
++ base->running_timer = NULL;
++ raw_spin_lock(&base->lock);
+ } else {
+- spin_unlock_irq(&base->lock);
++ raw_spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn, data);
+- spin_lock_irq(&base->lock);
++ base->running_timer = NULL;
++ raw_spin_lock_irq(&base->lock);
+ }
+ }
+ }
+@@ -1505,7 +1538,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ if (cpu_is_offline(smp_processor_id()))
+ return expires;
+
+- spin_lock(&base->lock);
++ raw_spin_lock(&base->lock);
+ nextevt = __next_timer_interrupt(base);
+ is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
+ base->next_expiry = nextevt;
+@@ -1529,7 +1562,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ if ((expires - basem) > TICK_NSEC)
+ base->is_idle = true;
+ }
+- spin_unlock(&base->lock);
++ raw_spin_unlock(&base->lock);
+
+ return cmp_next_hrtimer_event(basem, expires);
+ }
+@@ -1594,13 +1627,13 @@ void update_process_times(int user_tick)
+
+ /* Note: this timer irq context must be accounted for as well. */
+ account_process_tick(p, user_tick);
++ scheduler_tick();
+ run_local_timers();
+ rcu_check_callbacks(user_tick);
+-#ifdef CONFIG_IRQ_WORK
++#if defined(CONFIG_IRQ_WORK)
+ if (in_irq())
+ irq_work_tick();
+ #endif
+- scheduler_tick();
+ run_posix_cpu_timers(p);
+ }
+
+@@ -1616,7 +1649,7 @@ static inline void __run_timers(struct timer_base *base)
+ if (!time_after_eq(jiffies, base->clk))
+ return;
+
+- spin_lock_irq(&base->lock);
++ raw_spin_lock_irq(&base->lock);
+
+ while (time_after_eq(jiffies, base->clk)) {
+
+@@ -1626,8 +1659,8 @@ static inline void __run_timers(struct timer_base *base)
+ while (levels--)
+ expire_timers(base, heads + levels);
+ }
+- base->running_timer = NULL;
+- spin_unlock_irq(&base->lock);
++ raw_spin_unlock_irq(&base->lock);
++ wakeup_timer_waiters(base);
+ }
+
+ /*
+@@ -1637,6 +1670,8 @@ static void run_timer_softirq(struct softirq_action *h)
+ {
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
++ irq_work_tick_soft();
++
+ __run_timers(base);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+@@ -1822,16 +1857,16 @@ int timers_dead_cpu(unsigned int cpu)
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+- spin_lock_irq(&new_base->lock);
+- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock_irq(&new_base->lock);
++ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ BUG_ON(old_base->running_timer);
+
+ for (i = 0; i < WHEEL_SIZE; i++)
+ migrate_timer_list(new_base, old_base->vectors + i);
+
+- spin_unlock(&old_base->lock);
+- spin_unlock_irq(&new_base->lock);
++ raw_spin_unlock(&old_base->lock);
++ raw_spin_unlock_irq(&new_base->lock);
+ put_cpu_ptr(&timer_bases);
+ }
+ return 0;
+@@ -1847,8 +1882,11 @@ static void __init init_timer_cpu(int cpu)
+ for (i = 0; i < NR_BASES; i++) {
+ base = per_cpu_ptr(&timer_bases[i], cpu);
+ base->cpu = cpu;
+- spin_lock_init(&base->lock);
++ raw_spin_lock_init(&base->lock);
+ base->clk = jiffies;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ init_swait_queue_head(&base->wait_for_running_timer);
++#endif
+ }
+ }
+
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index f4b86e8ca1e7..340f14eef24a 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -187,6 +187,24 @@ config IRQSOFF_TRACER
+ enabled. This option and the preempt-off timing option can be
+ used together or separately.)
+
++config INTERRUPT_OFF_HIST
++ bool "Interrupts-off Latency Histogram"
++ depends on IRQSOFF_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the duration of time periods with interrupts disabled. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++ If PREEMPT_OFF_HIST is also selected, additional histograms (one
++ per cpu) are generated that accumulate the duration of time periods
++ when both interrupts and preemption are disabled. The histogram data
++ will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/irqsoff
++
+ config PREEMPT_TRACER
+ bool "Preemption-off Latency Tracer"
+ default n
+@@ -197,6 +215,7 @@ config PREEMPT_TRACER
+ select RING_BUFFER_ALLOW_SWAP
+ select TRACER_SNAPSHOT
+ select TRACER_SNAPSHOT_PER_CPU_SWAP
++ select USING_GET_LOCK_PARENT_IP
+ help
+ This option measures the time spent in preemption-off critical
+ sections, with microsecond accuracy.
+@@ -211,6 +230,24 @@ config PREEMPT_TRACER
+ enabled. This option and the irqs-off timing option can be
+ used together or separately.)
+
++config PREEMPT_OFF_HIST
++ bool "Preemption-off Latency Histogram"
++ depends on PREEMPT_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the duration of time periods with preemption disabled. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
++ per cpu) are generated that accumulate the duration of time periods
++ when both interrupts and preemption are disabled. The histogram data
++ will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/preemptoff
++
+ config SCHED_TRACER
+ bool "Scheduling Latency Tracer"
+ select GENERIC_TRACER
+@@ -221,6 +258,74 @@ config SCHED_TRACER
+ This tracer tracks the latency of the highest priority task
+ to be scheduled in, starting from the point it has woken up.
+
++config WAKEUP_LATENCY_HIST
++ bool "Scheduling Latency Histogram"
++ depends on SCHED_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the scheduling latency of the highest priority task.
++ The histograms are disabled by default. To enable them, write a
++ non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup
++
++ Two different algorithms are used, one to determine the latency of
++ processes that exclusively use the highest priority of the system and
++ another one to determine the latency of processes that share the
++ highest system priority with other processes. The former is used to
++ improve hardware and system software, the latter to optimize the
++ priority design of a given system. The histogram data will be
++ located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/wakeup
++
++ and
++
++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
++
++ If both Scheduling Latency Histogram and Missed Timer Offsets
++ Histogram are selected, additional histogram data will be collected
++ that contain, in addition to the wakeup latency, the timer latency, in
++ case the wakeup was triggered by an expired timer. These histograms
++ are available in the
++
++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++ directory. They reflect the apparent interrupt and scheduling latency
++ and are best suitable to determine the worst-case latency of a given
++ system. To enable these histograms, write a non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
++config MISSED_TIMER_OFFSETS_HIST
++ depends on HIGH_RES_TIMERS
++ select GENERIC_TRACER
++ bool "Missed Timer Offsets Histogram"
++ help
++ Generate a histogram of missed timer offsets in microseconds. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
++
++ The histogram data will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
++
++ If both Scheduling Latency Histogram and Missed Timer Offsets
++ Histogram are selected, additional histogram data will be collected
++ that contain, in addition to the wakeup latency, the timer latency, in
++ case the wakeup was triggered by an expired timer. These histograms
++ are available in the
++
++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++ directory. They reflect the apparent interrupt and scheduling latency
++ and are best suitable to determine the worst-case latency of a given
++ system. To enable these histograms, write a non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
+ config ENABLE_DEFAULT_TRACERS
+ bool "Trace process context switches and events"
+ depends on !GENERIC_TRACER
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index d0a1617b52b4..6bf9e9ff1fa5 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -41,6 +41,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
+ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
+ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
+ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
+new file mode 100644
+index 000000000000..7f6ee70dea41
+--- /dev/null
++++ b/kernel/trace/latency_hist.c
+@@ -0,0 +1,1178 @@
++/*
++ * kernel/trace/latency_hist.c
++ *
++ * Add support for histograms of preemption-off latency and
++ * interrupt-off latency and wakeup latency, it depends on
++ * Real-Time Preemption Support.
++ *
++ * Copyright (C) 2005 MontaVista Software, Inc.
++ * Yi Yang <yyang@ch.mvista.com>
++ *
++ * Converted to work with the new latency tracer.
++ * Copyright (C) 2008 Red Hat, Inc.
++ * Steven Rostedt <srostedt@redhat.com>
++ *
++ */
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/percpu.h>
++#include <linux/kallsyms.h>
++#include <linux/uaccess.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/slab.h>
++#include <linux/atomic.h>
++#include <asm/div64.h>
++
++#include "trace.h"
++#include <trace/events/sched.h>
++
++#define NSECS_PER_USECS 1000L
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/hist.h>
++
++enum {
++ IRQSOFF_LATENCY = 0,
++ PREEMPTOFF_LATENCY,
++ PREEMPTIRQSOFF_LATENCY,
++ WAKEUP_LATENCY,
++ WAKEUP_LATENCY_SHAREDPRIO,
++ MISSED_TIMER_OFFSETS,
++ TIMERANDWAKEUP_LATENCY,
++ MAX_LATENCY_TYPE,
++};
++
++#define MAX_ENTRY_NUM 10240
++
++struct hist_data {
++ atomic_t hist_mode; /* 0 log, 1 don't log */
++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
++ long min_lat;
++ long max_lat;
++ unsigned long long below_hist_bound_samples;
++ unsigned long long above_hist_bound_samples;
++ long long accumulate_lat;
++ unsigned long long total_samples;
++ unsigned long long hist_array[MAX_ENTRY_NUM];
++};
++
++struct enable_data {
++ int latency_type;
++ int enabled;
++};
++
++static char *latency_hist_dir_root = "latency_hist";
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
++static char *irqsoff_hist_dir = "irqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
++static DEFINE_PER_CPU(int, hist_irqsoff_counting);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
++static char *preemptoff_hist_dir = "preemptoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
++static DEFINE_PER_CPU(int, hist_preemptoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
++static char *preemptirqsoff_hist_dir = "preemptirqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
++static struct enable_data preemptirqsoff_enabled_data = {
++ .latency_type = PREEMPTIRQSOFF_LATENCY,
++ .enabled = 0,
++};
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++struct maxlatproc_data {
++ char comm[FIELD_SIZEOF(struct task_struct, comm)];
++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
++ int pid;
++ int current_pid;
++ int prio;
++ int current_prio;
++ long latency;
++ long timeroffset;
++ cycle_t timestamp;
++};
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
++static char *wakeup_latency_hist_dir = "wakeup";
++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
++static notrace void probe_wakeup_latency_hist_start(void *v,
++ struct task_struct *p);
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++ bool preempt, struct task_struct *prev, struct task_struct *next);
++static notrace void probe_sched_migrate_task(void *,
++ struct task_struct *task, int cpu);
++static struct enable_data wakeup_latency_enabled_data = {
++ .latency_type = WAKEUP_LATENCY,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
++static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
++static DEFINE_PER_CPU(int, wakeup_sharedprio);
++static unsigned long wakeup_pid;
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
++static char *missed_timer_offsets_dir = "missed_timer_offsets";
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++ long long offset, struct task_struct *curr, struct task_struct *task);
++static struct enable_data missed_timer_offsets_enabled_data = {
++ .latency_type = MISSED_TIMER_OFFSETS,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
++static unsigned long missed_timer_offsets_pid;
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
++static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
++static struct enable_data timerandwakeup_enabled_data = {
++ .latency_type = TIMERANDWAKEUP_LATENCY,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
++#endif
++
++void notrace latency_hist(int latency_type, int cpu, long latency,
++ long timeroffset, cycle_t stop,
++ struct task_struct *p)
++{
++ struct hist_data *my_hist;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ struct maxlatproc_data *mp = NULL;
++#endif
++
++ if (!cpu_possible(cpu) || latency_type < 0 ||
++ latency_type >= MAX_LATENCY_TYPE)
++ return;
++
++ switch (latency_type) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ case IRQSOFF_LATENCY:
++ my_hist = &per_cpu(irqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ case PREEMPTOFF_LATENCY:
++ my_hist = &per_cpu(preemptoff_hist, cpu);
++ break;
++#endif
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ my_hist = &per_cpu(preemptirqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ my_hist = &per_cpu(wakeup_latency_hist, cpu);
++ mp = &per_cpu(wakeup_maxlatproc, cpu);
++ break;
++ case WAKEUP_LATENCY_SHAREDPRIO:
++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ my_hist = &per_cpu(missed_timer_offsets, cpu);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++ break;
++#endif
++
++ default:
++ return;
++ }
++
++ latency += my_hist->offset;
++
++ if (atomic_read(&my_hist->hist_mode) == 0)
++ return;
++
++ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
++ if (latency < 0)
++ my_hist->below_hist_bound_samples++;
++ else
++ my_hist->above_hist_bound_samples++;
++ } else
++ my_hist->hist_array[latency]++;
++
++ if (unlikely(latency > my_hist->max_lat ||
++ my_hist->min_lat == LONG_MAX)) {
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ if (latency_type == WAKEUP_LATENCY ||
++ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++ latency_type == MISSED_TIMER_OFFSETS ||
++ latency_type == TIMERANDWAKEUP_LATENCY) {
++ strncpy(mp->comm, p->comm, sizeof(mp->comm));
++ strncpy(mp->current_comm, current->comm,
++ sizeof(mp->current_comm));
++ mp->pid = task_pid_nr(p);
++ mp->current_pid = task_pid_nr(current);
++ mp->prio = p->prio;
++ mp->current_prio = current->prio;
++ mp->latency = latency;
++ mp->timeroffset = timeroffset;
++ mp->timestamp = stop;
++ }
++#endif
++ my_hist->max_lat = latency;
++ }
++ if (unlikely(latency < my_hist->min_lat))
++ my_hist->min_lat = latency;
++ my_hist->total_samples++;
++ my_hist->accumulate_lat += latency;
++}
++
++static void *l_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t *index_ptr = NULL;
++ loff_t index = *pos;
++ struct hist_data *my_hist = m->private;
++
++ if (index == 0) {
++ char minstr[32], avgstr[32], maxstr[32];
++
++ atomic_dec(&my_hist->hist_mode);
++
++ if (likely(my_hist->total_samples)) {
++ long avg = (long) div64_s64(my_hist->accumulate_lat,
++ my_hist->total_samples);
++ snprintf(minstr, sizeof(minstr), "%ld",
++ my_hist->min_lat - my_hist->offset);
++ snprintf(avgstr, sizeof(avgstr), "%ld",
++ avg - my_hist->offset);
++ snprintf(maxstr, sizeof(maxstr), "%ld",
++ my_hist->max_lat - my_hist->offset);
++ } else {
++ strcpy(minstr, "<undef>");
++ strcpy(avgstr, minstr);
++ strcpy(maxstr, minstr);
++ }
++
++ seq_printf(m, "#Minimum latency: %s microseconds\n"
++ "#Average latency: %s microseconds\n"
++ "#Maximum latency: %s microseconds\n"
++ "#Total samples: %llu\n"
++ "#There are %llu samples lower than %ld"
++ " microseconds.\n"
++ "#There are %llu samples greater or equal"
++ " than %ld microseconds.\n"
++ "#usecs\t%16s\n",
++ minstr, avgstr, maxstr,
++ my_hist->total_samples,
++ my_hist->below_hist_bound_samples,
++ -my_hist->offset,
++ my_hist->above_hist_bound_samples,
++ MAX_ENTRY_NUM - my_hist->offset,
++ "samples");
++ }
++ if (index < MAX_ENTRY_NUM) {
++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
++ if (index_ptr)
++ *index_ptr = index;
++ }
++
++ return index_ptr;
++}
++
++static void *l_next(struct seq_file *m, void *p, loff_t *pos)
++{
++ loff_t *index_ptr = p;
++ struct hist_data *my_hist = m->private;
++
++ if (++*pos >= MAX_ENTRY_NUM) {
++ atomic_inc(&my_hist->hist_mode);
++ return NULL;
++ }
++ *index_ptr = *pos;
++ return index_ptr;
++}
++
++static void l_stop(struct seq_file *m, void *p)
++{
++ kfree(p);
++}
++
++static int l_show(struct seq_file *m, void *p)
++{
++ int index = *(loff_t *) p;
++ struct hist_data *my_hist = m->private;
++
++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
++ my_hist->hist_array[index]);
++ return 0;
++}
++
++static const struct seq_operations latency_hist_seq_op = {
++ .start = l_start,
++ .next = l_next,
++ .stop = l_stop,
++ .show = l_show
++};
++
++static int latency_hist_open(struct inode *inode, struct file *file)
++{
++ int ret;
++
++ ret = seq_open(file, &latency_hist_seq_op);
++ if (!ret) {
++ struct seq_file *seq = file->private_data;
++ seq->private = inode->i_private;
++ }
++ return ret;
++}
++
++static const struct file_operations latency_hist_fops = {
++ .open = latency_hist_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static void clear_maxlatprocdata(struct maxlatproc_data *mp)
++{
++ mp->comm[0] = mp->current_comm[0] = '\0';
++ mp->prio = mp->current_prio = mp->pid = mp->current_pid =
++ mp->latency = mp->timeroffset = -1;
++ mp->timestamp = 0;
++}
++#endif
++
++static void hist_reset(struct hist_data *hist)
++{
++ atomic_dec(&hist->hist_mode);
++
++ memset(hist->hist_array, 0, sizeof(hist->hist_array));
++ hist->below_hist_bound_samples = 0ULL;
++ hist->above_hist_bound_samples = 0ULL;
++ hist->min_lat = LONG_MAX;
++ hist->max_lat = LONG_MIN;
++ hist->total_samples = 0ULL;
++ hist->accumulate_lat = 0LL;
++
++ atomic_inc(&hist->hist_mode);
++}
++
++static ssize_t
++latency_hist_reset(struct file *file, const char __user *a,
++ size_t size, loff_t *off)
++{
++ int cpu;
++ struct hist_data *hist = NULL;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ struct maxlatproc_data *mp = NULL;
++#endif
++ off_t latency_type = (off_t) file->private_data;
++
++ for_each_online_cpu(cpu) {
++
++ switch (latency_type) {
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ case PREEMPTOFF_LATENCY:
++ hist = &per_cpu(preemptoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ case IRQSOFF_LATENCY:
++ hist = &per_cpu(irqsoff_hist, cpu);
++ break;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ hist = &per_cpu(preemptirqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ hist = &per_cpu(wakeup_latency_hist, cpu);
++ mp = &per_cpu(wakeup_maxlatproc, cpu);
++ break;
++ case WAKEUP_LATENCY_SHAREDPRIO:
++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ hist = &per_cpu(missed_timer_offsets, cpu);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++ break;
++#endif
++ }
++
++ hist_reset(hist);
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ if (latency_type == WAKEUP_LATENCY ||
++ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++ latency_type == MISSED_TIMER_OFFSETS ||
++ latency_type == TIMERANDWAKEUP_LATENCY)
++ clear_maxlatprocdata(mp);
++#endif
++ }
++
++ return size;
++}
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ int r;
++ unsigned long *this_pid = file->private_data;
++
++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t do_pid(struct file *file, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ unsigned long pid;
++ unsigned long *this_pid = file->private_data;
++
++ if (cnt >= sizeof(buf))
++ return -EINVAL;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = '\0';
++
++ if (kstrtoul(buf, 10, &pid))
++ return -EINVAL;
++
++ *this_pid = pid;
++
++ return cnt;
++}
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ int r;
++ struct maxlatproc_data *mp = file->private_data;
++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
++ unsigned long long t;
++ unsigned long usecs, secs;
++ char *buf;
++
++ if (mp->pid == -1 || mp->current_pid == -1) {
++ buf = "(none)\n";
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf,
++ strlen(buf));
++ }
++
++ buf = kmalloc(strmaxlen, GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
++
++ t = ns2usecs(mp->timestamp);
++ usecs = do_div(t, USEC_PER_SEC);
++ secs = (unsigned long) t;
++ r = snprintf(buf, strmaxlen,
++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
++ secs, usecs);
++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++ kfree(buf);
++ return r;
++}
++#endif
++
++static ssize_t
++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ struct enable_data *ed = file->private_data;
++ int r;
++
++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t
++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ long enable;
++ struct enable_data *ed = file->private_data;
++
++ if (cnt >= sizeof(buf))
++ return -EINVAL;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = 0;
++
++ if (kstrtoul(buf, 10, &enable))
++ return -EINVAL;
++
++ if ((enable && ed->enabled) || (!enable && !ed->enabled))
++ return cnt;
++
++ if (enable) {
++ int ret;
++
++ switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ ret = register_trace_preemptirqsoff_hist(
++ probe_preemptirqsoff_hist, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_preemptirqsoff_hist "
++ "to trace_preemptirqsoff_hist\n");
++ return ret;
++ }
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ ret = register_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_start "
++ "to trace_sched_wakeup\n");
++ return ret;
++ }
++ ret = register_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_start "
++ "to trace_sched_wakeup_new\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ return ret;
++ }
++ ret = register_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_stop "
++ "to trace_sched_switch\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ return ret;
++ }
++ ret = register_trace_sched_migrate_task(
++ probe_sched_migrate_task, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_sched_migrate_task "
++ "to trace_sched_migrate_task\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ return ret;
++ }
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ ret = register_trace_hrtimer_interrupt(
++ probe_hrtimer_interrupt, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_hrtimer_interrupt "
++ "to trace_hrtimer_interrupt\n");
++ return ret;
++ }
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ if (!wakeup_latency_enabled_data.enabled ||
++ !missed_timer_offsets_enabled_data.enabled)
++ return -EINVAL;
++ break;
++#endif
++ default:
++ break;
++ }
++ } else {
++ switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ {
++ int cpu;
++
++ unregister_trace_preemptirqsoff_hist(
++ probe_preemptirqsoff_hist, NULL);
++ for_each_online_cpu(cpu) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ per_cpu(hist_irqsoff_counting,
++ cpu) = 0;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ per_cpu(hist_preemptoff_counting,
++ cpu) = 0;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ per_cpu(hist_preemptirqsoff_counting,
++ cpu) = 0;
++#endif
++ }
++ }
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ {
++ int cpu;
++
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ unregister_trace_sched_migrate_task(
++ probe_sched_migrate_task, NULL);
++
++ for_each_online_cpu(cpu) {
++ per_cpu(wakeup_task, cpu) = NULL;
++ per_cpu(wakeup_sharedprio, cpu) = 0;
++ }
++ }
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ timerandwakeup_enabled_data.enabled = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ unregister_trace_hrtimer_interrupt(
++ probe_hrtimer_interrupt, NULL);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ timerandwakeup_enabled_data.enabled = 0;
++#endif
++ break;
++#endif
++ default:
++ break;
++ }
++ }
++ ed->enabled = enable;
++ return cnt;
++}
++
++static const struct file_operations latency_hist_reset_fops = {
++ .open = tracing_open_generic,
++ .write = latency_hist_reset,
++};
++
++static const struct file_operations enable_fops = {
++ .open = tracing_open_generic,
++ .read = show_enable,
++ .write = do_enable,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static const struct file_operations pid_fops = {
++ .open = tracing_open_generic,
++ .read = show_pid,
++ .write = do_pid,
++};
++
++static const struct file_operations maxlatproc_fops = {
++ .open = tracing_open_generic,
++ .read = show_maxlatproc,
++};
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason,
++ int starthist)
++{
++ int cpu = raw_smp_processor_id();
++ int time_set = 0;
++
++ if (starthist) {
++ cycle_t uninitialized_var(start);
++
++ if (!preempt_count() && !irqs_disabled())
++ return;
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ if ((reason == IRQS_OFF || reason == TRACE_START) &&
++ !per_cpu(hist_irqsoff_counting, cpu)) {
++ per_cpu(hist_irqsoff_counting, cpu) = 1;
++ start = ftrace_now(cpu);
++ time_set++;
++ per_cpu(hist_irqsoff_start, cpu) = start;
++ }
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
++ !per_cpu(hist_preemptoff_counting, cpu)) {
++ per_cpu(hist_preemptoff_counting, cpu) = 1;
++ if (!(time_set++))
++ start = ftrace_now(cpu);
++ per_cpu(hist_preemptoff_start, cpu) = start;
++ }
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ if (per_cpu(hist_irqsoff_counting, cpu) &&
++ per_cpu(hist_preemptoff_counting, cpu) &&
++ !per_cpu(hist_preemptirqsoff_counting, cpu)) {
++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
++ if (!time_set)
++ start = ftrace_now(cpu);
++ per_cpu(hist_preemptirqsoff_start, cpu) = start;
++ }
++#endif
++ } else {
++ cycle_t uninitialized_var(stop);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
++ per_cpu(hist_irqsoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
++
++ stop = ftrace_now(cpu);
++ time_set++;
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
++ stop, NULL);
++ }
++ per_cpu(hist_irqsoff_counting, cpu) = 0;
++ }
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
++ per_cpu(hist_preemptoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
++
++ if (!(time_set++))
++ stop = ftrace_now(cpu);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
++ 0, stop, NULL);
++ }
++ per_cpu(hist_preemptoff_counting, cpu) = 0;
++ }
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
++ !per_cpu(hist_preemptoff_counting, cpu)) &&
++ per_cpu(hist_preemptirqsoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
++
++ if (!time_set)
++ stop = ftrace_now(cpu);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
++ latency, 0, stop, NULL);
++ }
++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
++ }
++#endif
++ }
++}
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_RAW_SPINLOCK(wakeup_lock);
++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
++ int cpu)
++{
++ int old_cpu = task_cpu(task);
++
++ if (cpu != old_cpu) {
++ unsigned long flags;
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
++ if (task == cpu_wakeup_task) {
++ put_task_struct(cpu_wakeup_task);
++ per_cpu(wakeup_task, old_cpu) = NULL;
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
++ get_task_struct(cpu_wakeup_task);
++ }
++
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++ }
++}
++
++static notrace void probe_wakeup_latency_hist_start(void *v,
++ struct task_struct *p)
++{
++ unsigned long flags;
++ struct task_struct *curr = current;
++ int cpu = task_cpu(p);
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++ if (wakeup_pid) {
++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++ p->prio == curr->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++ if (likely(wakeup_pid != task_pid_nr(p)))
++ goto out;
++ } else {
++ if (likely(!rt_task(p)) ||
++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
++ p->prio > curr->prio)
++ goto out;
++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++ p->prio == curr->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++ }
++
++ if (cpu_wakeup_task)
++ put_task_struct(cpu_wakeup_task);
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
++ get_task_struct(cpu_wakeup_task);
++ cpu_wakeup_task->preempt_timestamp_hist =
++ ftrace_now(raw_smp_processor_id());
++out:
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++ bool preempt, struct task_struct *prev, struct task_struct *next)
++{
++ unsigned long flags;
++ int cpu = task_cpu(next);
++ long latency;
++ cycle_t stop;
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++ if (cpu_wakeup_task == NULL)
++ goto out;
++
++ /* Already running? */
++ if (unlikely(current == cpu_wakeup_task))
++ goto out_reset;
++
++ if (next != cpu_wakeup_task) {
++ if (next->prio < cpu_wakeup_task->prio)
++ goto out_reset;
++
++ if (next->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
++ goto out;
++ }
++
++ if (current->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
++ /*
++ * The task we are waiting for is about to be switched to.
++ * Calculate latency and store it in histogram.
++ */
++ stop = ftrace_now(raw_smp_processor_id());
++
++ latency = ((long) (stop - next->preempt_timestamp_hist)) /
++ NSECS_PER_USECS;
++
++ if (per_cpu(wakeup_sharedprio, cpu)) {
++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
++ next);
++ per_cpu(wakeup_sharedprio, cpu) = 0;
++ } else {
++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ if (timerandwakeup_enabled_data.enabled) {
++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
++ next->timer_offset + latency, next->timer_offset,
++ stop, next);
++ }
++#endif
++ }
++
++out_reset:
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ next->timer_offset = 0;
++#endif
++ put_task_struct(cpu_wakeup_task);
++ per_cpu(wakeup_task, cpu) = NULL;
++out:
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++ long long latency_ns, struct task_struct *curr,
++ struct task_struct *task)
++{
++ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
++ (task->prio < curr->prio ||
++ (task->prio == curr->prio &&
++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
++ long latency;
++ cycle_t now;
++
++ if (missed_timer_offsets_pid) {
++ if (likely(missed_timer_offsets_pid !=
++ task_pid_nr(task)))
++ return;
++ }
++
++ now = ftrace_now(cpu);
++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
++ task);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ task->timer_offset = latency;
++#endif
++ }
++}
++#endif
++
++static __init int latency_hist_init(void)
++{
++ struct dentry *latency_hist_root = NULL;
++ struct dentry *dentry;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ struct dentry *dentry_sharedprio;
++#endif
++ struct dentry *entry;
++ struct dentry *enable_root;
++ int i = 0;
++ struct hist_data *my_hist;
++ char name[64];
++ char *cpufmt = "CPU%d";
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ char *cpufmt_maxlatproc = "max_latency-CPU%d";
++ struct maxlatproc_data *mp = NULL;
++#endif
++
++ dentry = tracing_init_dentry();
++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
++ enable_root = debugfs_create_dir("enable", latency_hist_root);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(irqsoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ dentry = debugfs_create_dir(preemptoff_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(preemptoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(preemptirqsoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ entry = debugfs_create_file("preemptirqsoff", 0644,
++ enable_root, (void *)&preemptirqsoff_enabled_data,
++ &enable_fops);
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ dentry = debugfs_create_dir(wakeup_latency_hist_dir,
++ latency_hist_root);
++ dentry_sharedprio = debugfs_create_dir(
++ wakeup_latency_hist_dir_sharedprio, dentry);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(wakeup_latency_hist, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(wakeup_latency_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
++ &per_cpu(wakeup_latency_hist_sharedprio, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++
++ mp = &per_cpu(wakeup_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("pid", 0644, dentry,
++ (void *)&wakeup_pid, &pid_fops);
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
++ entry = debugfs_create_file("wakeup", 0644,
++ enable_root, (void *)&wakeup_latency_enabled_data,
++ &enable_fops);
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ dentry = debugfs_create_dir(missed_timer_offsets_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
++ my_hist = &per_cpu(missed_timer_offsets, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("pid", 0644, dentry,
++ (void *)&missed_timer_offsets_pid, &pid_fops);
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
++ entry = debugfs_create_file("missed_timer_offsets", 0644,
++ enable_root, (void *)&missed_timer_offsets_enabled_data,
++ &enable_fops);
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(timerandwakeup_latency_hist, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++ mp = &per_cpu(timerandwakeup_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
++ entry = debugfs_create_file("timerandwakeup", 0644,
++ enable_root, (void *)&timerandwakeup_enabled_data,
++ &enable_fops);
++#endif
++ return 0;
++}
++
++device_initcall(latency_hist_init);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7bc56762ca35..84ffcb813263 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+ struct task_struct *tsk = current;
+
+ entry->preempt_count = pc & 0xff;
++ entry->preempt_lazy_count = preempt_lazy_count();
+ entry->pid = (tsk) ? tsk->pid : 0;
+ entry->flags =
+ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+@@ -1907,8 +1908,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+ ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
++ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
+ (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
++
++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+@@ -2892,14 +2896,17 @@ get_total_entries(struct trace_buffer *buf,
+
+ static void print_lat_help_header(struct seq_file *m)
+ {
+- seq_puts(m, "# _------=> CPU# \n"
+- "# / _-----=> irqs-off \n"
+- "# | / _----=> need-resched \n"
+- "# || / _---=> hardirq/softirq \n"
+- "# ||| / _--=> preempt-depth \n"
+- "# |||| / delay \n"
+- "# cmd pid ||||| time | caller \n"
+- "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# _--------=> CPU# \n"
++ "# / _-------=> irqs-off \n"
++ "# | / _------=> need-resched \n"
++ "# || / _-----=> need-resched_lazy \n"
++ "# ||| / _----=> hardirq/softirq \n"
++ "# |||| / _---=> preempt-depth \n"
++ "# ||||| / _--=> preempt-lazy-depth\n"
++ "# |||||| / _-=> migrate-disable \n"
++ "# ||||||| / delay \n"
++ "# cmd pid |||||||| time | caller \n"
++ "# \\ / |||||||| \\ | / \n");
+ }
+
+ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
+@@ -2925,11 +2932,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
+ print_event_info(buf, m);
+ seq_puts(m, "# _-----=> irqs-off\n"
+ "# / _----=> need-resched\n"
+- "# | / _---=> hardirq/softirq\n"
+- "# || / _--=> preempt-depth\n"
+- "# ||| / delay\n"
+- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
+- "# | | | |||| | |\n");
++ "# |/ _-----=> need-resched_lazy\n"
++ "# || / _---=> hardirq/softirq\n"
++ "# ||| / _--=> preempt-depth\n"
++ "# |||| / _-=> preempt-lazy-depth\n"
++ "# ||||| / _-=> migrate-disable \n"
++ "# |||||| / delay\n"
++ "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n"
++ "# | | | ||||||| | |\n");
+ }
+
+ void
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index f783df416726..6f2d0fa4fbf1 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -123,6 +123,7 @@ struct kretprobe_trace_entry_head {
+ * NEED_RESCHED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
++ * NEED_RESCHED_LAZY - lazy reschedule is requested
+ */
+ enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+@@ -132,6 +133,7 @@ enum trace_flag_type {
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
+ };
+
+ #define TRACE_BUF_SIZE 1024
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 03c0a48c3ac4..0b85d516b491 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -187,6 +187,8 @@ static int trace_define_common_fields(void)
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
++ __common_field(unsigned short, migrate_disable);
++ __common_field(unsigned short, padding);
+
+ return ret;
+ }
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 03cdff84d026..940bd10b4406 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -13,6 +13,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/module.h>
+ #include <linux/ftrace.h>
++#include <trace/events/hist.h>
+
+ #include "trace.h"
+
+@@ -424,11 +425,13 @@ void start_critical_timings(void)
+ {
+ if (preempt_trace() || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
+
+ void stop_critical_timings(void)
+ {
++ trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
+ if (preempt_trace() || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(a0, a1);
++ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
+ }
+
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
+ */
+ void trace_hardirqs_on(void)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
+
+ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -494,6 +502,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, caller_addr);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
+ if (preempt_trace() && !irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
+ if (preempt_trace() && !irq_trace())
+ start_critical_timing(a0, a1);
+ }
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 0bb9cf2d53e6..455a7464772f 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ {
+ char hardsoft_irq;
+ char need_resched;
++ char need_resched_lazy;
+ char irqs_off;
+ int hardirq;
+ int softirq;
+@@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ break;
+ }
+
++ need_resched_lazy =
++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
++
+ hardsoft_irq =
+ (nmi && hardirq) ? 'Z' :
+ nmi ? 'z' :
+@@ -424,14 +428,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ softirq ? 's' :
+ '.' ;
+
+- trace_seq_printf(s, "%c%c%c",
+- irqs_off, need_resched, hardsoft_irq);
++ trace_seq_printf(s, "%c%c%c%c",
++ irqs_off, need_resched, need_resched_lazy,
++ hardsoft_irq);
+
+ if (entry->preempt_count)
+ trace_seq_printf(s, "%x", entry->preempt_count);
+ else
+ trace_seq_putc(s, '.');
+
++ if (entry->preempt_lazy_count)
++ trace_seq_printf(s, "%x", entry->preempt_lazy_count);
++ else
++ trace_seq_putc(s, '.');
++
++ if (entry->migrate_disable)
++ trace_seq_printf(s, "%x", entry->migrate_disable);
++ else
++ trace_seq_putc(s, '.');
++
+ return !trace_seq_has_overflowed(s);
+ }
+
+diff --git a/kernel/user.c b/kernel/user.c
+index b069ccbfb0b0..1a2e88e98b5e 100644
+--- a/kernel/user.c
++++ b/kernel/user.c
+@@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
+ if (!up)
+ return;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ free_user(up, flags);
+ else
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ struct user_struct *alloc_uid(kuid_t uid)
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 9acb29f280ec..caba62080411 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -315,6 +315,8 @@ static int is_softlockup(unsigned long touch_ts)
+
+ #ifdef CONFIG_HARDLOCKUP_DETECTOR
+
++static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++
+ static struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+@@ -349,6 +351,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
+ /* only print hardlockups once */
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
++ /*
++ * If early-printk is enabled then make sure we do not
++ * lock up in printk() and kill console logging:
++ */
++ printk_kill();
++
++ raw_spin_lock(&watchdog_output_lock);
+
+ pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ print_modules();
+@@ -366,6 +375,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
+ !test_and_set_bit(0, &hardlockup_allcpu_dumped))
+ trigger_allbutself_cpu_backtrace();
+
++ raw_spin_unlock(&watchdog_output_lock);
+ if (hardlockup_panic)
+ nmi_panic(regs, "Hard LOCKUP");
+
+@@ -513,6 +523,7 @@ static void watchdog_enable(unsigned int cpu)
+ /* kick off the timer for the hardlockup detector */
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
++ hrtimer->irqsafe = 1;
+
+ /* Enable the perf event */
+ watchdog_nmi_enable(cpu);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index ef071ca73fc3..c7a62d6adb00 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -48,6 +48,8 @@
+ #include <linux/nodemask.h>
+ #include <linux/moduleparam.h>
+ #include <linux/uaccess.h>
++#include <linux/locallock.h>
++#include <linux/delay.h>
+
+ #include "workqueue_internal.h"
+
+@@ -121,11 +123,16 @@ enum {
+ * cpu or grabbing pool->lock is enough for read access. If
+ * POOL_DISASSOCIATED is set, it's identical to L.
+ *
++ * On RT we need the extra protection via rt_lock_idle_list() for
++ * the list manipulations against read access from
++ * wq_worker_sleeping(). All other places are nicely serialized via
++ * pool->lock.
++ *
+ * A: pool->attach_mutex protected.
+ *
+ * PL: wq_pool_mutex protected.
+ *
+- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
++ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
+ *
+ * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
+ *
+@@ -134,7 +141,7 @@ enum {
+ *
+ * WQ: wq->mutex protected.
+ *
+- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
++ * WR: wq->mutex protected for writes. RCU protected for reads.
+ *
+ * MD: wq_mayday_lock protected.
+ */
+@@ -185,7 +192,7 @@ struct worker_pool {
+ atomic_t nr_running ____cacheline_aligned_in_smp;
+
+ /*
+- * Destruction of pool is sched-RCU protected to allow dereferences
++ * Destruction of pool is RCU protected to allow dereferences
+ * from get_work_pool().
+ */
+ struct rcu_head rcu;
+@@ -214,7 +221,7 @@ struct pool_workqueue {
+ /*
+ * Release of unbound pwq is punted to system_wq. See put_pwq()
+ * and pwq_unbound_release_workfn() for details. pool_workqueue
+- * itself is also sched-RCU protected so that the first pwq can be
++ * itself is also RCU protected so that the first pwq can be
+ * determined without grabbing wq->mutex.
+ */
+ struct work_struct unbound_release_work;
+@@ -348,6 +355,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
+ struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
+ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+
++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
++
+ static int worker_thread(void *__worker);
+ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+
+@@ -355,20 +364,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+ #include <trace/events/workqueue.h>
+
+ #define assert_rcu_or_pool_mutex() \
+- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&wq_pool_mutex), \
+- "sched RCU or wq_pool_mutex should be held")
++ "RCU or wq_pool_mutex should be held")
+
+ #define assert_rcu_or_wq_mutex(wq) \
+- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&wq->mutex), \
+- "sched RCU or wq->mutex should be held")
++ "RCU or wq->mutex should be held")
+
+ #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
+- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&wq->mutex) && \
+ !lockdep_is_held(&wq_pool_mutex), \
+- "sched RCU, wq->mutex or wq_pool_mutex should be held")
++ "RCU, wq->mutex or wq_pool_mutex should be held")
+
+ #define for_each_cpu_worker_pool(pool, cpu) \
+ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
+@@ -380,7 +389,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+ * @pool: iteration cursor
+ * @pi: integer used for iteration
+ *
+- * This must be called either with wq_pool_mutex held or sched RCU read
++ * This must be called either with wq_pool_mutex held or RCU read
+ * locked. If the pool needs to be used beyond the locking in effect, the
+ * caller is responsible for guaranteeing that the pool stays online.
+ *
+@@ -412,7 +421,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+ * @pwq: iteration cursor
+ * @wq: the target workqueue
+ *
+- * This must be called either with wq->mutex held or sched RCU read locked.
++ * This must be called either with wq->mutex held or RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+ *
+@@ -424,6 +433,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
+ else
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static inline void rt_lock_idle_list(struct worker_pool *pool)
++{
++ preempt_disable();
++}
++static inline void rt_unlock_idle_list(struct worker_pool *pool)
++{
++ preempt_enable();
++}
++static inline void sched_lock_idle_list(struct worker_pool *pool) { }
++static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
++#else
++static inline void rt_lock_idle_list(struct worker_pool *pool) { }
++static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
++static inline void sched_lock_idle_list(struct worker_pool *pool)
++{
++ spin_lock_irq(&pool->lock);
++}
++static inline void sched_unlock_idle_list(struct worker_pool *pool)
++{
++ spin_unlock_irq(&pool->lock);
++}
++#endif
++
++
+ #ifdef CONFIG_DEBUG_OBJECTS_WORK
+
+ static struct debug_obj_descr work_debug_descr;
+@@ -548,7 +582,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
+ * @wq: the target workqueue
+ * @node: the node ID
+ *
+- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
++ * This must be called with any of wq_pool_mutex, wq->mutex or RCU
+ * read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+@@ -692,8 +726,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
+ * @work: the work item of interest
+ *
+ * Pools are created and destroyed under wq_pool_mutex, and allows read
+- * access under sched-RCU read lock. As such, this function should be
+- * called under wq_pool_mutex or with preemption disabled.
++ * access under RCU read lock. As such, this function should be
++ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
+ *
+ * All fields of the returned pool are accessible as long as the above
+ * mentioned locking is in effect. If the returned pool needs to be used
+@@ -830,50 +864,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
+ */
+ static void wake_up_worker(struct worker_pool *pool)
+ {
+- struct worker *worker = first_idle_worker(pool);
++ struct worker *worker;
++
++ rt_lock_idle_list(pool);
++
++ worker = first_idle_worker(pool);
+
+ if (likely(worker))
+ wake_up_process(worker->task);
++
++ rt_unlock_idle_list(pool);
+ }
+
+ /**
+- * wq_worker_waking_up - a worker is waking up
++ * wq_worker_running - a worker is running again
+ * @task: task waking up
+- * @cpu: CPU @task is waking up to
+ *
+- * This function is called during try_to_wake_up() when a worker is
+- * being awoken.
+- *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
++ * This function is called when a worker returns from schedule()
+ */
+-void wq_worker_waking_up(struct task_struct *task, int cpu)
++void wq_worker_running(struct task_struct *task)
+ {
+ struct worker *worker = kthread_data(task);
+
+- if (!(worker->flags & WORKER_NOT_RUNNING)) {
+- WARN_ON_ONCE(worker->pool->cpu != cpu);
++ if (!worker->sleeping)
++ return;
++ if (!(worker->flags & WORKER_NOT_RUNNING))
+ atomic_inc(&worker->pool->nr_running);
+- }
++ worker->sleeping = 0;
+ }
+
+ /**
+ * wq_worker_sleeping - a worker is going to sleep
+ * @task: task going to sleep
+ *
+- * This function is called during schedule() when a busy worker is
+- * going to sleep. Worker on the same cpu can be woken up by
+- * returning pointer to its task.
+- *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
+- *
+- * Return:
+- * Worker task on @cpu to wake up, %NULL if none.
++ * This function is called from schedule() when a busy worker is
++ * going to sleep.
+ */
+-struct task_struct *wq_worker_sleeping(struct task_struct *task)
++void wq_worker_sleeping(struct task_struct *task)
+ {
+- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
++ struct worker *worker = kthread_data(task);
+ struct worker_pool *pool;
+
+ /*
+@@ -882,29 +911,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
+ * checking NOT_RUNNING.
+ */
+ if (worker->flags & WORKER_NOT_RUNNING)
+- return NULL;
++ return;
+
+ pool = worker->pool;
+
+- /* this can only happen on the local cpu */
+- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
+- return NULL;
++ if (WARN_ON_ONCE(worker->sleeping))
++ return;
++
++ worker->sleeping = 1;
+
+ /*
+ * The counterpart of the following dec_and_test, implied mb,
+ * worklist not empty test sequence is in insert_work().
+ * Please read comment there.
+- *
+- * NOT_RUNNING is clear. This means that we're bound to and
+- * running on the local cpu w/ rq lock held and preemption
+- * disabled, which in turn means that none else could be
+- * manipulating idle_list, so dereferencing idle_list without pool
+- * lock is safe.
+ */
+ if (atomic_dec_and_test(&pool->nr_running) &&
+- !list_empty(&pool->worklist))
+- to_wakeup = first_idle_worker(pool);
+- return to_wakeup ? to_wakeup->task : NULL;
++ !list_empty(&pool->worklist)) {
++ sched_lock_idle_list(pool);
++ wake_up_worker(pool);
++ sched_unlock_idle_list(pool);
++ }
+ }
+
+ /**
+@@ -1098,12 +1124,12 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+ {
+ if (pwq) {
+ /*
+- * As both pwqs and pools are sched-RCU protected, the
++ * As both pwqs and pools are RCU protected, the
+ * following lock operations are safe.
+ */
+- spin_lock_irq(&pwq->pool->lock);
++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
+ put_pwq(pwq);
+- spin_unlock_irq(&pwq->pool->lock);
++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
+ }
+ }
+
+@@ -1207,7 +1233,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ struct worker_pool *pool;
+ struct pool_workqueue *pwq;
+
+- local_irq_save(*flags);
++ local_lock_irqsave(pendingb_lock, *flags);
+
+ /* try to steal the timer if it exists */
+ if (is_dwork) {
+@@ -1226,6 +1252,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
+ return 0;
+
++ rcu_read_lock();
+ /*
+ * The queueing is in progress, or it is already queued. Try to
+ * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+@@ -1264,14 +1291,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+ set_work_pool_and_keep_pending(work, pool->id);
+
+ spin_unlock(&pool->lock);
++ rcu_read_unlock();
+ return 1;
+ }
+ spin_unlock(&pool->lock);
+ fail:
+- local_irq_restore(*flags);
++ rcu_read_unlock();
++ local_unlock_irqrestore(pendingb_lock, *flags);
+ if (work_is_canceling(work))
+ return -ENOENT;
+- cpu_relax();
++ cpu_chill();
+ return -EAGAIN;
+ }
+
+@@ -1373,7 +1402,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ * queued or lose PENDING. Grabbing PENDING and queueing should
+ * happen with IRQ disabled.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+
+ debug_work_activate(work);
+
+@@ -1381,6 +1410,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ if (unlikely(wq->flags & __WQ_DRAINING) &&
+ WARN_ON_ONCE(!is_chained_work(wq)))
+ return;
++ rcu_read_lock();
+ retry:
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
+@@ -1437,10 +1467,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ /* pwq determined, queue */
+ trace_workqueue_queue_work(req_cpu, pwq, work);
+
+- if (WARN_ON(!list_empty(&work->entry))) {
+- spin_unlock(&pwq->pool->lock);
+- return;
+- }
++ if (WARN_ON(!list_empty(&work->entry)))
++ goto out;
+
+ pwq->nr_in_flight[pwq->work_color]++;
+ work_flags = work_color_to_flags(pwq->work_color);
+@@ -1458,7 +1486,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+
+ insert_work(pwq, work, worklist, work_flags);
+
++out:
+ spin_unlock(&pwq->pool->lock);
++ rcu_read_unlock();
+ }
+
+ /**
+@@ -1478,14 +1508,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
+ bool ret = false;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pendingb_lock,flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work(cpu, wq, work);
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_work_on);
+@@ -1552,14 +1582,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ unsigned long flags;
+
+ /* read the comment in __queue_work() */
+- local_irq_save(flags);
++ local_lock_irqsave(pendingb_lock, flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_delayed_work_on);
+@@ -1594,7 +1624,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+
+ if (likely(ret >= 0)) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ }
+
+ /* -ENOENT from try_to_grab_pending() becomes %true */
+@@ -1627,7 +1657,9 @@ static void worker_enter_idle(struct worker *worker)
+ worker->last_active = jiffies;
+
+ /* idle_list is LIFO */
++ rt_lock_idle_list(pool);
+ list_add(&worker->entry, &pool->idle_list);
++ rt_unlock_idle_list(pool);
+
+ if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
+ mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
+@@ -1660,7 +1692,9 @@ static void worker_leave_idle(struct worker *worker)
+ return;
+ worker_clr_flags(worker, WORKER_IDLE);
+ pool->nr_idle--;
++ rt_lock_idle_list(pool);
+ list_del_init(&worker->entry);
++ rt_unlock_idle_list(pool);
+ }
+
+ static struct worker *alloc_worker(int node)
+@@ -1826,7 +1860,9 @@ static void destroy_worker(struct worker *worker)
+ pool->nr_workers--;
+ pool->nr_idle--;
+
++ rt_lock_idle_list(pool);
+ list_del_init(&worker->entry);
++ rt_unlock_idle_list(pool);
+ worker->flags |= WORKER_DIE;
+ wake_up_process(worker->task);
+ }
+@@ -2785,14 +2821,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
+
+ might_sleep();
+
+- local_irq_disable();
++ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (!pool) {
+- local_irq_enable();
++ rcu_read_unlock();
+ return false;
+ }
+
+- spin_lock(&pool->lock);
++ spin_lock_irq(&pool->lock);
+ /* see the comment in try_to_grab_pending() with the same code */
+ pwq = get_work_pwq(work);
+ if (pwq) {
+@@ -2821,10 +2857,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
+ else
+ lock_map_acquire_read(&pwq->wq->lockdep_map);
+ lock_map_release(&pwq->wq->lockdep_map);
+-
++ rcu_read_unlock();
+ return true;
+ already_gone:
+ spin_unlock_irq(&pool->lock);
++ rcu_read_unlock();
+ return false;
+ }
+
+@@ -2911,7 +2948,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+
+ /* tell other tasks trying to grab @work to back off */
+ mark_work_canceling(work);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+
+ flush_work(work);
+ clear_work_data(work);
+@@ -2966,10 +3003,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+ */
+ bool flush_delayed_work(struct delayed_work *dwork)
+ {
+- local_irq_disable();
++ local_lock_irq(pendingb_lock);
+ if (del_timer_sync(&dwork->timer))
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
+- local_irq_enable();
++ local_unlock_irq(pendingb_lock);
+ return flush_work(&dwork->work);
+ }
+ EXPORT_SYMBOL(flush_delayed_work);
+@@ -3004,7 +3041,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
+
+ set_work_pool_and_clear_pending(&dwork->work,
+ get_work_pool_id(&dwork->work));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(cancel_delayed_work);
+@@ -3233,7 +3270,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
+ * put_unbound_pool - put a worker_pool
+ * @pool: worker_pool to put
+ *
+- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
+ * safe manner. get_unbound_pool() calls this function on its failure path
+ * and this function should be able to release pools which went through,
+ * successfully or not, init_worker_pool().
+@@ -3287,8 +3324,8 @@ static void put_unbound_pool(struct worker_pool *pool)
+ del_timer_sync(&pool->idle_timer);
+ del_timer_sync(&pool->mayday_timer);
+
+- /* sched-RCU protected to allow dereferences from get_work_pool() */
+- call_rcu_sched(&pool->rcu, rcu_free_pool);
++ /* RCU protected to allow dereferences from get_work_pool() */
++ call_rcu(&pool->rcu, rcu_free_pool);
+ }
+
+ /**
+@@ -3395,14 +3432,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
+ put_unbound_pool(pool);
+ mutex_unlock(&wq_pool_mutex);
+
+- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
++ call_rcu(&pwq->rcu, rcu_free_pwq);
+
+ /*
+ * If we're the last pwq going away, @wq is already dead and no one
+ * is gonna access it anymore. Schedule RCU free.
+ */
+ if (is_last)
+- call_rcu_sched(&wq->rcu, rcu_free_wq);
++ call_rcu(&wq->rcu, rcu_free_wq);
+ }
+
+ /**
+@@ -4052,7 +4089,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
+ * The base ref is never dropped on per-cpu pwqs. Directly
+ * schedule RCU free.
+ */
+- call_rcu_sched(&wq->rcu, rcu_free_wq);
++ call_rcu(&wq->rcu, rcu_free_wq);
+ } else {
+ /*
+ * We're the sole accessor of @wq at this point. Directly
+@@ -4145,7 +4182,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
+ struct pool_workqueue *pwq;
+ bool ret;
+
+- rcu_read_lock_sched();
++ rcu_read_lock();
++ preempt_disable();
+
+ if (cpu == WORK_CPU_UNBOUND)
+ cpu = smp_processor_id();
+@@ -4156,7 +4194,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
+ pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+
+ ret = !list_empty(&pwq->delayed_works);
+- rcu_read_unlock_sched();
++ preempt_enable();
++ rcu_read_unlock();
+
+ return ret;
+ }
+@@ -4182,15 +4221,15 @@ unsigned int work_busy(struct work_struct *work)
+ if (work_pending(work))
+ ret |= WORK_BUSY_PENDING;
+
+- local_irq_save(flags);
++ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (pool) {
+- spin_lock(&pool->lock);
++ spin_lock_irqsave(&pool->lock, flags);
+ if (find_worker_executing_work(pool, work))
+ ret |= WORK_BUSY_RUNNING;
+- spin_unlock(&pool->lock);
++ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+- local_irq_restore(flags);
++ rcu_read_unlock();
+
+ return ret;
+ }
+@@ -4379,7 +4418,7 @@ void show_workqueue_state(void)
+ unsigned long flags;
+ int pi;
+
+- rcu_read_lock_sched();
++ rcu_read_lock();
+
+ pr_info("Showing busy workqueues and worker pools:\n");
+
+@@ -4432,7 +4471,7 @@ void show_workqueue_state(void)
+ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
+ }
+
+ /*
+@@ -4770,16 +4809,16 @@ bool freeze_workqueues_busy(void)
+ * nr_active is monotonically decreasing. It's safe
+ * to peek without lock.
+ */
+- rcu_read_lock_sched();
++ rcu_read_lock();
+ for_each_pwq(pwq, wq) {
+ WARN_ON_ONCE(pwq->nr_active < 0);
+ if (pwq->nr_active) {
+ busy = true;
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
+ goto out_unlock;
+ }
+ }
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
+ }
+ out_unlock:
+ mutex_unlock(&wq_pool_mutex);
+@@ -4969,7 +5008,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
+ const char *delim = "";
+ int node, written = 0;
+
+- rcu_read_lock_sched();
++ get_online_cpus();
++ rcu_read_lock();
+ for_each_node(node) {
+ written += scnprintf(buf + written, PAGE_SIZE - written,
+ "%s%d:%d", delim, node,
+@@ -4977,7 +5017,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
+ delim = " ";
+ }
+ written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
++ put_online_cpus();
+
+ return written;
+ }
+diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
+index 8635417c587b..f000c4d6917e 100644
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -43,6 +43,7 @@ struct worker {
+ unsigned long last_active; /* L: last active timestamp */
+ unsigned int flags; /* X: flags */
+ int id; /* I: worker id */
++ int sleeping; /* None */
+
+ /*
+ * Opaque string set with work_set_desc(). Printed out with task
+@@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void)
+ * Scheduler hooks for concurrency managed workqueue. Only to be used from
+ * sched/core.c and workqueue.c.
+ */
+-void wq_worker_waking_up(struct task_struct *task, int cpu);
+-struct task_struct *wq_worker_sleeping(struct task_struct *task);
++void wq_worker_running(struct task_struct *task);
++void wq_worker_sleeping(struct task_struct *task);
+
+ #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
+diff --git a/lib/Kconfig b/lib/Kconfig
+index d79909dc01ec..fd2accb2f2bb 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -400,6 +400,7 @@ config CHECK_SIGNATURE
+
+ config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
++ depends on !PREEMPT_RT_FULL
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index cab7405f48d2..dbc49c48ff53 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -977,6 +977,7 @@ config TIMER_STATS
+ config DEBUG_PREEMPT
+ bool "Debug preemptible kernel"
+ depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
++ select USING_GET_LOCK_PARENT_IP
+ default y
+ help
+ If you say Y here then the kernel will use a debug variant of the
+@@ -1159,8 +1160,17 @@ config LOCK_TORTURE_TEST
+
+ endmenu # lock debugging
+
++config USING_GET_LOCK_PARENT_IP
++ bool
++ help
++ Enables the use of the function get_lock_parent_ip() that
++ will use __builtin_return_address(n) with n > 0 causing
++ some gcc warnings. When this is selected, those warnings
++ will be suppressed.
++
+ config TRACE_IRQFLAGS
+ bool
++ select USING_GET_LOCK_PARENT_IP
+ help
+ Enables hooks to interrupt enabling and disabling for
+ either tracing or lock debugging.
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index a8e12601eb37..c76d5f0beafe 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+ struct debug_obj *obj;
+ unsigned long flags;
+
+- fill_pool();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (preempt_count() == 0 && !irqs_disabled())
++#endif
++ fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+diff --git a/lib/idr.c b/lib/idr.c
+index 6098336df267..9decbe914595 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -30,6 +30,7 @@
+ #include <linux/idr.h>
+ #include <linux/spinlock.h>
+ #include <linux/percpu.h>
++#include <linux/locallock.h>
+
+ #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
+ #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
+@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
+ static DEFINE_PER_CPU(int, idr_preload_cnt);
+ static DEFINE_SPINLOCK(simple_ida_lock);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
++
++static inline void idr_preload_lock(void)
++{
++ local_lock(idr_lock);
++}
++
++static inline void idr_preload_unlock(void)
++{
++ local_unlock(idr_lock);
++}
++
++void idr_preload_end(void)
++{
++ idr_preload_unlock();
++}
++EXPORT_SYMBOL(idr_preload_end);
++#else
++static inline void idr_preload_lock(void)
++{
++ preempt_disable();
++}
++
++static inline void idr_preload_unlock(void)
++{
++ preempt_enable();
++}
++#endif
++
++
+ /* the maximum ID which can be allocated given idr->layers */
+ static int idr_max(int layers)
+ {
+@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
+ * context. See idr_preload() for details.
+ */
+ if (!in_interrupt()) {
+- preempt_disable();
++ idr_preload_lock();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+- preempt_enable();
++ idr_preload_unlock();
+ if (new)
+ return new;
+ }
+@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
+ idr_mark_full(pa, id);
+ }
+
+-
+ /**
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
+@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
+ WARN_ON_ONCE(in_interrupt());
+ might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+
+- preempt_disable();
++ idr_preload_lock();
+
+ /*
+ * idr_alloc() is likely to succeed w/o full idr_layer buffer and
+@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
+ while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
+ struct idr_layer *new;
+
+- preempt_enable();
++ idr_preload_unlock();
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+- preempt_disable();
++ idr_preload_lock();
+ if (!new)
+ break;
+
+diff --git a/lib/irq_poll.c b/lib/irq_poll.c
+index 836f7db4e548..709d4eed1df9 100644
+--- a/lib/irq_poll.c
++++ b/lib/irq_poll.c
+@@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop)
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_sched);
+
+@@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop)
+ local_irq_save(flags);
+ __irq_poll_complete(iop);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+
+@@ -95,6 +97,7 @@ static void irq_poll_softirq(struct softirq_action *h)
+ }
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+@@ -132,6 +135,7 @@ static void irq_poll_softirq(struct softirq_action *h)
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -199,6 +203,7 @@ static int irq_poll_cpu_notify(struct notifier_block *self,
+ this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 872a15a2a637..b93a6103fa4d 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
+
+@@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Enabling hardirqs with a softirq-safe lock held:
+ */
+@@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+ #undef E1
+ #undef E2
+
++#endif
++
+ /*
+ * Enabling irqs with an irq-safe lock held:
+ */
+@@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
+
+@@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+
+@@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
+
+@@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+ #undef E3
+@@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
+
+@@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+ #undef E3
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ /*
+ * read-lock / write-lock irq inversion.
+ *
+@@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
+ #undef E2
+ #undef E3
+
++#endif
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ /*
+ * read-lock / write-lock recursion that is actually safe.
+ */
+@@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
+ #undef E2
+ #undef E3
+
++#endif
++
+ /*
+ * read-lock / write-lock recursion that is unsafe.
+ */
+@@ -1858,6 +1885,7 @@ void locking_selftest(void)
+
+ printk(" --------------------------------------------------------------------------\n");
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * irq-context testcases:
+ */
+@@ -1870,6 +1898,28 @@ void locking_selftest(void)
+
+ DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
+ // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
++#else
++ /* On -rt, we only do hardirq context test for raw spinlock */
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
++#endif
+
+ ww_tests();
+
+diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
+index 6d40944960de..822a2c027e72 100644
+--- a/lib/percpu_ida.c
++++ b/lib/percpu_ida.c
+@@ -26,6 +26,9 @@
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
+ #include <linux/percpu_ida.h>
++#include <linux/locallock.h>
++
++static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock);
+
+ struct percpu_ida_cpu {
+ /*
+@@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
+ unsigned long flags;
+ int tag;
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+
+ /* Fastpath */
+ tag = alloc_local_tag(tags);
+ if (likely(tag >= 0)) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+ return tag;
+ }
+
+@@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
+
+ if (!tags->nr_free)
+ alloc_global_tags(pool, tags);
++
+ if (!tags->nr_free)
+ steal_tags(pool, tags);
+
+@@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
+ }
+
+ spin_unlock(&pool->lock);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+
+ if (tag >= 0 || state == TASK_RUNNING)
+ break;
+@@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
+
+ schedule();
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+ }
+ if (state != TASK_RUNNING)
+@@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
+
+ BUG_ON(tag >= pool->nr_tags);
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+
+ spin_lock(&tags->lock);
+@@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
+ spin_unlock(&pool->lock);
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(percpu_ida_free);
+
+@@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+ struct percpu_ida_cpu *remote;
+ unsigned cpu, i, err = 0;
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ for_each_possible_cpu(cpu) {
+ remote = per_cpu_ptr(pool->tag_cpu, cpu);
+ spin_lock(&remote->lock);
+@@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+ }
+ spin_unlock(&pool->lock);
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 8e6d552c40dd..881cc195d85f 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -290,13 +290,14 @@ radix_tree_node_alloc(struct radix_tree_root *root)
+ * succeed in getting a node here (and never reach
+ * kmem_cache_alloc)
+ */
+- rtp = this_cpu_ptr(&radix_tree_preloads);
++ rtp = &get_cpu_var(radix_tree_preloads);
+ if (rtp->nr) {
+ ret = rtp->nodes;
+ rtp->nodes = ret->private_data;
+ ret->private_data = NULL;
+ rtp->nr--;
+ }
++ put_cpu_var(radix_tree_preloads);
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+@@ -336,6 +337,7 @@ radix_tree_node_free(struct radix_tree_node *node)
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+@@ -455,6 +457,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
+
+ return __radix_tree_preload(gfp_mask, nr_nodes);
+ }
++#endif
+
+ /*
+ * The maximum index which can be stored in a radix tree
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 004fc70fc56a..ccc46992a517 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+- WARN_ON_ONCE(preemptible());
++ WARN_ON_ONCE(!pagefault_disabled());
+ kunmap_atomic(miter->addr);
+ } else
+ kunmap(miter->page);
+@@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+ if (!sg_miter_skip(&miter, skip))
+ return false;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+@@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+
+ sg_miter_stop(&miter);
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return offset;
+ }
+ EXPORT_SYMBOL(sg_copy_buffer);
+diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
+index 1afec32de6f2..11fa431046a8 100644
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -39,8 +39,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
+ if (!printk_ratelimit())
+ goto out_enable;
+
+- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
+- what1, what2, preempt_count() - 1, current->comm, current->pid);
++ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
++ what1, what2, preempt_count() - 1, __migrate_disabled(current),
++ current->comm, current->pid);
+
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+diff --git a/mm/Kconfig b/mm/Kconfig
+index be0ee11fa0d9..fe2857d67973 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
+ select COMPACTION
+ select RADIX_TREE_MULTIORDER
+ help
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 8fde443f36d7..d7a863b0ec20 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
+ {
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return;
+ }
+
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 9affb2908304..d5eb0e52e96f 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1585,10 +1585,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
+ block_start_pfn(cc->migrate_pfn, cc->order);
+
+ if (cc->last_migrated_pfn < current_block_start) {
+- cpu = get_cpu();
++ cpu = get_cpu_light();
++ local_lock_irq(swapvec_lock);
+ lru_add_drain_cpu(cpu);
++ local_unlock_irq(swapvec_lock);
+ drain_local_pages(zone);
+- put_cpu();
++ put_cpu_light();
+ /* No more flushing until we migrate again */
+ cc->last_migrated_pfn = 0;
+ }
+diff --git a/mm/filemap.c b/mm/filemap.c
+index ced9ef6c06b0..19f6f0d77604 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
+ * node->private_list is protected by
+ * mapping->tree_lock.
+ */
+- if (!list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes,
++ if (!list_empty(&node->private_list)) {
++ local_lock(workingset_shadow_lock);
++ list_lru_del(&__workingset_shadow_nodes,
+ &node->private_list);
++ local_unlock(workingset_shadow_lock);
++ }
+ }
+ return 0;
+ }
+@@ -217,8 +220,10 @@ static void page_cache_tree_delete(struct address_space *mapping,
+ if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
+ list_empty(&node->private_list)) {
+ node->private_data = mapping;
+- list_lru_add(&workingset_shadow_nodes,
+- &node->private_list);
++ local_lock(workingset_shadow_lock);
++ list_lru_add(&__workingset_shadow_nodes,
++ &node->private_list);
++ local_unlock(workingset_shadow_lock);
+ }
+ }
+
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 50b4ca6787f0..77518a3b35a1 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -29,10 +29,11 @@
+ #include <linux/kgdb.h>
+ #include <asm/tlbflush.h>
+
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ DEFINE_PER_CPU(int, __kmap_atomic_idx);
+ #endif
++#endif
+
+ /*
+ * Virtual_count is not a pure "count".
+@@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
+ unsigned long totalhigh_pages __read_mostly;
+ EXPORT_SYMBOL(totalhigh_pages);
+
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
++#endif
+
+ unsigned int nr_free_highpages (void)
+ {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4be518d4e68a..724240ca2f35 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -67,6 +67,7 @@
+ #include <net/sock.h>
+ #include <net/ip.h>
+ #include "slab.h"
++#include <linux/locallock.h>
+
+ #include <asm/uaccess.h>
+
+@@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
+ #define do_swap_account 0
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(event_lock);
++
+ /* Whether legacy memory+swap accounting is active */
+ static bool do_memsw_account(void)
+ {
+@@ -1724,6 +1727,7 @@ struct memcg_stock_pcp {
+ #define FLUSHING_CACHED_CHARGE 0
+ };
+ static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
++static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
+ static DEFINE_MUTEX(percpu_charge_mutex);
+
+ /**
+@@ -1746,7 +1750,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ if (nr_pages > CHARGE_BATCH)
+ return ret;
+
+- local_irq_save(flags);
++ local_lock_irqsave(memcg_stock_ll, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+@@ -1754,7 +1758,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(memcg_stock_ll, flags);
+
+ return ret;
+ }
+@@ -1781,13 +1785,13 @@ static void drain_local_stock(struct work_struct *dummy)
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(memcg_stock_ll, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ drain_stock(stock);
+ clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(memcg_stock_ll, flags);
+ }
+
+ /*
+@@ -1799,7 +1803,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(memcg_stock_ll, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (stock->cached != memcg) { /* reset if necessary */
+@@ -1808,7 +1812,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ }
+ stock->nr_pages += nr_pages;
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(memcg_stock_ll, flags);
+ }
+
+ /*
+@@ -1824,7 +1828,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
+ return;
+ /* Notify other cpus that system-wide "drain" is running */
+ get_online_cpus();
+- curcpu = get_cpu();
++ curcpu = get_cpu_light();
+ for_each_online_cpu(cpu) {
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+ struct mem_cgroup *memcg;
+@@ -1841,7 +1845,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
+ schedule_work_on(cpu, &stock->work);
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+ put_online_cpus();
+ mutex_unlock(&percpu_charge_mutex);
+ }
+@@ -4566,12 +4570,12 @@ static int mem_cgroup_move_account(struct page *page,
+
+ ret = 0;
+
+- local_irq_disable();
++ local_lock_irq(event_lock);
+ mem_cgroup_charge_statistics(to, page, compound, nr_pages);
+ memcg_check_events(to, page);
+ mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
+ memcg_check_events(from, page);
+- local_irq_enable();
++ local_unlock_irq(event_lock);
+ out_unlock:
+ unlock_page(page);
+ out:
+@@ -5444,10 +5448,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
+
+ commit_charge(page, memcg, lrucare);
+
+- local_irq_disable();
++ local_lock_irq(event_lock);
+ mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
+ memcg_check_events(memcg, page);
+- local_irq_enable();
++ local_unlock_irq(event_lock);
+
+ if (do_memsw_account() && PageSwapCache(page)) {
+ swp_entry_t entry = { .val = page_private(page) };
+@@ -5503,14 +5507,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+ memcg_oom_recover(memcg);
+ }
+
+- local_irq_save(flags);
++ local_lock_irqsave(event_lock, flags);
+ __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
+ __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
+ __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
+ __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
+ __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
+ memcg_check_events(memcg, dummy_page);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(event_lock, flags);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put_many(&memcg->css, nr_pages);
+@@ -5665,10 +5669,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+
+ commit_charge(newpage, memcg, false);
+
+- local_irq_save(flags);
++ local_lock_irqsave(event_lock, flags);
+ mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+ memcg_check_events(memcg, newpage);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(event_lock, flags);
+ }
+
+ DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
+@@ -5845,6 +5849,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ {
+ struct mem_cgroup *memcg, *swap_memcg;
+ unsigned short oldid;
++ unsigned long flags;
+
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_PAGE(page_count(page), page);
+@@ -5885,12 +5890,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ * important here to have the interrupts disabled because it is the
+ * only synchronisation we have for udpating the per-CPU variables.
+ */
++ local_lock_irqsave(event_lock, flags);
++#ifndef CONFIG_PREEMPT_RT_BASE
+ VM_BUG_ON(!irqs_disabled());
++#endif
+ mem_cgroup_charge_statistics(memcg, page, false, -1);
+ memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put(&memcg->css);
++ local_unlock_irqrestore(event_lock, flags);
+ }
+
+ /*
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index 6f4d27c5bb32..5cd25c745a8f 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
++ preempt_disable_rt();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
++ preempt_enable_rt();
+ task_unlock(tsk);
+ #ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a2214c64ed3c..4be4d5d66f73 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -61,6 +61,7 @@
+ #include <linux/page_ext.h>
+ #include <linux/hugetlb.h>
+ #include <linux/sched/rt.h>
++#include <linux/locallock.h>
+ #include <linux/page_owner.h>
+ #include <linux/kthread.h>
+ #include <linux/memcontrol.h>
+@@ -276,6 +277,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ EXPORT_SYMBOL(nr_online_nodes);
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define cpu_lock_irqsave(cpu, flags) \
++ local_lock_irqsave_on(pa_lock, flags, cpu)
++# define cpu_unlock_irqrestore(cpu, flags) \
++ local_unlock_irqrestore_on(pa_lock, flags, cpu)
++#else
++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
++#endif
++
+ int page_group_by_mobility_disabled __read_mostly;
+
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+@@ -1056,7 +1069,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
+ #endif /* CONFIG_DEBUG_VM */
+
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -1067,19 +1080,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *list)
+ {
+- int migratetype = 0;
+- int batch_free = 0;
+ unsigned long nr_scanned;
+ bool isolated_pageblocks;
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
+
+- spin_lock(&zone->lock);
+ isolated_pageblocks = has_isolate_pageblock(zone);
+ nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
+ if (nr_scanned)
+ __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
+
++ while (!list_empty(list)) {
++ struct page *page;
++ int mt; /* migratetype of the to-be-freed page */
++
++ page = list_first_entry(list, struct page, lru);
++ /* must delete as __free_one_page list manipulates */
++ list_del(&page->lru);
++
++ mt = get_pcppage_migratetype(page);
++ /* MIGRATE_ISOLATE page should not go to pcplists */
++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
++ /* Pageblock could have been isolated meanwhile */
++ if (unlikely(isolated_pageblocks))
++ mt = get_pageblock_migratetype(page);
++
++ if (bulkfree_pcp_prepare(page))
++ continue;
++
++ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
++ trace_mm_page_pcpu_drain(page, 0, mt);
++ count--;
++ }
++ WARN_ON(count != 0);
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++
++/*
++ * Moves a number of pages from the PCP lists to free list which
++ * is freed outside of the locked region.
++ *
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ */
++static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
++ struct list_head *dst)
++{
++ int migratetype = 0;
++ int batch_free = 0;
++
+ while (count) {
+ struct page *page;
+ struct list_head *list;
+@@ -1095,7 +1147,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+- list = &pcp->lists[migratetype];
++ list = &src->lists[migratetype];
+ } while (list_empty(list));
+
+ /* This is the only non-empty list. Free them all. */
+@@ -1103,27 +1155,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+ batch_free = count;
+
+ do {
+- int mt; /* migratetype of the to-be-freed page */
+-
+ page = list_last_entry(list, struct page, lru);
+- /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+
+- mt = get_pcppage_migratetype(page);
+- /* MIGRATE_ISOLATE page should not go to pcplists */
+- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+- /* Pageblock could have been isolated meanwhile */
+- if (unlikely(isolated_pageblocks))
+- mt = get_pageblock_migratetype(page);
+-
+- if (bulkfree_pcp_prepare(page))
+- continue;
+-
+- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+- trace_mm_page_pcpu_drain(page, 0, mt);
++ list_add(&page->lru, dst);
+ } while (--count && --batch_free && !list_empty(list));
+ }
+- spin_unlock(&zone->lock);
+ }
+
+ static void free_one_page(struct zone *zone,
+@@ -1132,7 +1169,9 @@ static void free_one_page(struct zone *zone,
+ int migratetype)
+ {
+ unsigned long nr_scanned;
+- spin_lock(&zone->lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
+ nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
+ if (nr_scanned)
+ __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
+@@ -1142,7 +1181,7 @@ static void free_one_page(struct zone *zone,
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ }
+ __free_one_page(page, pfn, zone, order, migratetype);
+- spin_unlock(&zone->lock);
++ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
+@@ -1228,10 +1267,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+ return;
+
+ migratetype = get_pfnblock_migratetype(page, pfn);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+@@ -2219,16 +2258,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
++ LIST_HEAD(dst);
+ int to_drain, batch;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ batch = READ_ONCE(pcp->batch);
+ to_drain = min(pcp->count, batch);
+ if (to_drain > 0) {
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
+ pcp->count -= to_drain;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+
+@@ -2244,16 +2285,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ unsigned long flags;
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
+
+- local_irq_save(flags);
++ cpu_lock_irqsave(cpu, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+- if (pcp->count) {
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count) {
++ isolate_pcp_pages(count, pcp, &dst);
+ pcp->count = 0;
+ }
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+
+ /*
+@@ -2339,8 +2385,17 @@ void drain_all_pages(struct zone *zone)
+ else
+ cpumask_clear_cpu(cpu, &cpus_with_pcps);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
+ zone, 1);
++#else
++ for_each_cpu(cpu, &cpus_with_pcps) {
++ if (zone)
++ drain_pages_zone(cpu, zone);
++ else
++ drain_pages(cpu);
++ }
++#endif
+ }
+
+ #ifdef CONFIG_HIBERNATION
+@@ -2400,7 +2455,7 @@ void free_hot_cold_page(struct page *page, bool cold)
+
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ set_pcppage_migratetype(page, migratetype);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ __count_vm_event(PGFREE);
+
+ /*
+@@ -2426,12 +2481,17 @@ void free_hot_cold_page(struct page *page, bool cold)
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+ unsigned long batch = READ_ONCE(pcp->batch);
+- free_pcppages_bulk(zone, batch, pcp);
++ LIST_HEAD(dst);
++
++ isolate_pcp_pages(batch, pcp, &dst);
+ pcp->count -= batch;
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, batch, &dst);
++ return;
+ }
+
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
+@@ -2568,7 +2628,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+ struct per_cpu_pages *pcp;
+ struct list_head *list;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ do {
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+@@ -2595,7 +2655,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+- spin_lock_irqsave(&zone->lock, flags);
++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+
+ do {
+ page = NULL;
+@@ -2607,22 +2667,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+ if (!page)
+ page = __rmqueue(zone, order, migratetype);
+ } while (page && check_new_pages(page, order));
+- spin_unlock(&zone->lock);
+- if (!page)
++ if (!page) {
++ spin_unlock(&zone->lock);
+ goto failed;
++ }
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_pcppage_migratetype(page));
++ spin_unlock(&zone->lock);
+ }
+
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+
+ VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ return page;
+
+ failed:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ return NULL;
+ }
+
+@@ -6528,7 +6590,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+ int cpu = (unsigned long)hcpu;
+
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
++ local_lock_irq_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
++ local_unlock_irq_on(swapvec_lock, cpu);
+ drain_pages(cpu);
+
+ /*
+@@ -6554,6 +6618,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+ void __init page_alloc_init(void)
+ {
+ hotcpu_notifier(page_alloc_cpu_notify, 0);
++ local_irq_lock_init(pa_lock);
+ }
+
+ /*
+@@ -7370,7 +7435,7 @@ void zone_pcp_reset(struct zone *zone)
+ struct per_cpu_pageset *pset;
+
+ /* avoid races with drain_pages() */
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (zone->pageset != &boot_pageset) {
+ for_each_online_cpu(cpu) {
+ pset = per_cpu_ptr(zone->pageset, cpu);
+@@ -7379,7 +7444,7 @@ void zone_pcp_reset(struct zone *zone)
+ free_percpu(zone->pageset);
+ zone->pageset = &boot_pageset;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ #ifdef CONFIG_MEMORY_HOTREMOVE
+diff --git a/mm/slab.h b/mm/slab.h
+index 9653f2e2591a..b7371e026627 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
+ * The slab lists for all objects.
+ */
+ struct kmem_cache_node {
++#ifdef CONFIG_SLUB
++ raw_spinlock_t list_lock;
++#else
+ spinlock_t list_lock;
++#endif
+
+ #ifdef CONFIG_SLAB
+ struct list_head slabs_partial; /* partial list first, better asm code */
+diff --git a/mm/slub.c b/mm/slub.c
+index 9adae58462f8..4b386747f050 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1145,7 +1145,7 @@ static noinline int free_debug_processing(
+ unsigned long uninitialized_var(flags);
+ int ret = 0;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ slab_lock(page);
+
+ if (s->flags & SLAB_CONSISTENCY_CHECKS) {
+@@ -1180,7 +1180,7 @@ static noinline int free_debug_processing(
+ bulk_cnt, cnt);
+
+ slab_unlock(page);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ if (!ret)
+ slab_fix(s, "Object at 0x%p not freed", object);
+ return ret;
+@@ -1308,6 +1308,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
+
+ #endif /* CONFIG_SLUB_DEBUG */
+
++struct slub_free_list {
++ raw_spinlock_t lock;
++ struct list_head list;
++};
++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
++
+ /*
+ * Hooks for other subsystems that check memory allocations. In a typical
+ * production configuration these hooks all should produce no code at all.
+@@ -1527,10 +1533,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+ void *start, *p;
+ int idx, order;
+ bool shuffle;
++ bool enableirqs = false;
+
+ flags &= gfp_allowed_mask;
+
+ if (gfpflags_allow_blocking(flags))
++ enableirqs = true;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (system_state == SYSTEM_RUNNING)
++ enableirqs = true;
++#endif
++ if (enableirqs)
+ local_irq_enable();
+
+ flags |= s->allocflags;
+@@ -1605,7 +1618,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+ page->frozen = 1;
+
+ out:
+- if (gfpflags_allow_blocking(flags))
++ if (enableirqs)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+@@ -1664,6 +1677,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
+ __free_pages(page, order);
+ }
+
++static void free_delayed(struct list_head *h)
++{
++ while(!list_empty(h)) {
++ struct page *page = list_first_entry(h, struct page, lru);
++
++ list_del(&page->lru);
++ __free_slab(page->slab_cache, page);
++ }
++}
++
+ #define need_reserve_slab_rcu \
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
+@@ -1695,6 +1718,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
+ }
+
+ call_rcu(head, rcu_free_slab);
++ } else if (irqs_disabled()) {
++ struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
++
++ raw_spin_lock(&f->lock);
++ list_add(&page->lru, &f->list);
++ raw_spin_unlock(&f->lock);
+ } else
+ __free_slab(s, page);
+ }
+@@ -1802,7 +1831,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ if (!n || !n->nr_partial)
+ return NULL;
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ void *t;
+
+@@ -1827,7 +1856,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ break;
+
+ }
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ return object;
+ }
+
+@@ -2073,7 +2102,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ * that acquire_slab() will see a slab page that
+ * is frozen
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ } else {
+ m = M_FULL;
+@@ -2084,7 +2113,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ * slabs from diagnostic functions will not see
+ * any frozen slabs.
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ }
+
+@@ -2119,7 +2148,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ goto redo;
+
+ if (lock)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ if (m == M_FREE) {
+ stat(s, DEACTIVATE_EMPTY);
+@@ -2151,10 +2180,10 @@ static void unfreeze_partials(struct kmem_cache *s,
+ n2 = get_node(s, page_to_nid(page));
+ if (n != n2) {
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ n = n2;
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+
+ do {
+@@ -2183,7 +2212,7 @@ static void unfreeze_partials(struct kmem_cache *s,
+ }
+
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+@@ -2222,14 +2251,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+ pobjects = oldpage->pobjects;
+ pages = oldpage->pages;
+ if (drain && pobjects > s->cpu_partial) {
++ struct slub_free_list *f;
+ unsigned long flags;
++ LIST_HEAD(tofree);
+ /*
+ * partial array is full. Move the existing
+ * set to the per node partial list.
+ */
+ local_irq_save(flags);
+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ f = this_cpu_ptr(&slub_free_list);
++ raw_spin_lock(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
++ free_delayed(&tofree);
+ oldpage = NULL;
+ pobjects = 0;
+ pages = 0;
+@@ -2301,7 +2337,22 @@ static bool has_cpu_slab(int cpu, void *info)
+
+ static void flush_all(struct kmem_cache *s)
+ {
++ LIST_HEAD(tofree);
++ int cpu;
++
+ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
++ for_each_online_cpu(cpu) {
++ struct slub_free_list *f;
++
++ if (!has_cpu_slab(cpu, s))
++ continue;
++
++ f = &per_cpu(slub_free_list, cpu);
++ raw_spin_lock_irq(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock_irq(&f->lock);
++ free_delayed(&tofree);
++ }
+ }
+
+ /*
+@@ -2337,10 +2388,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
+ unsigned long x = 0;
+ struct page *page;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += get_count(page);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+ }
+ #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
+@@ -2478,8 +2529,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+ * already disabled (which is the case for bulk allocation).
+ */
+ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+- unsigned long addr, struct kmem_cache_cpu *c)
++ unsigned long addr, struct kmem_cache_cpu *c,
++ struct list_head *to_free)
+ {
++ struct slub_free_list *f;
+ void *freelist;
+ struct page *page;
+
+@@ -2539,6 +2592,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ VM_BUG_ON(!c->page->frozen);
+ c->freelist = get_freepointer(s, freelist);
+ c->tid = next_tid(c->tid);
++
++out:
++ f = this_cpu_ptr(&slub_free_list);
++ raw_spin_lock(&f->lock);
++ list_splice_init(&f->list, to_free);
++ raw_spin_unlock(&f->lock);
++
+ return freelist;
+
+ new_slab:
+@@ -2570,7 +2630,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ deactivate_slab(s, page, get_freepointer(s, freelist));
+ c->page = NULL;
+ c->freelist = NULL;
+- return freelist;
++ goto out;
+ }
+
+ /*
+@@ -2582,6 +2642,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ {
+ void *p;
+ unsigned long flags;
++ LIST_HEAD(tofree);
+
+ local_irq_save(flags);
+ #ifdef CONFIG_PREEMPT
+@@ -2593,8 +2654,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ c = this_cpu_ptr(s->cpu_slab);
+ #endif
+
+- p = ___slab_alloc(s, gfpflags, node, addr, c);
++ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
+ local_irq_restore(flags);
++ free_delayed(&tofree);
+ return p;
+ }
+
+@@ -2780,7 +2842,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+
+ do {
+ if (unlikely(n)) {
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ n = NULL;
+ }
+ prior = page->freelist;
+@@ -2812,7 +2874,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ * Otherwise the list_lock will synchronize with
+ * other processors updating the list of slabs.
+ */
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ }
+ }
+@@ -2854,7 +2916,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ add_partial(n, page, DEACTIVATE_TO_TAIL);
+ stat(s, FREE_ADD_PARTIAL);
+ }
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return;
+
+ slab_empty:
+@@ -2869,7 +2931,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ remove_full(s, n, page);
+ }
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ stat(s, FREE_SLAB);
+ discard_slab(s, page);
+ }
+@@ -3074,6 +3136,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ void **p)
+ {
+ struct kmem_cache_cpu *c;
++ LIST_HEAD(to_free);
+ int i;
+
+ /* memcg and kmem_cache debug support */
+@@ -3097,7 +3160,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ * of re-populating per CPU c->freelist
+ */
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
+- _RET_IP_, c);
++ _RET_IP_, c, &to_free);
+ if (unlikely(!p[i]))
+ goto error;
+
+@@ -3109,6 +3172,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ }
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
++ free_delayed(&to_free);
+
+ /* Clear memory outside IRQ disabled fastpath loop */
+ if (unlikely(flags & __GFP_ZERO)) {
+@@ -3256,7 +3320,7 @@ static void
+ init_kmem_cache_node(struct kmem_cache_node *n)
+ {
+ n->nr_partial = 0;
+- spin_lock_init(&n->list_lock);
++ raw_spin_lock_init(&n->list_lock);
+ INIT_LIST_HEAD(&n->partial);
+ #ifdef CONFIG_SLUB_DEBUG
+ atomic_long_set(&n->nr_slabs, 0);
+@@ -3600,6 +3664,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
+ const char *text)
+ {
+ #ifdef CONFIG_SLUB_DEBUG
++#ifdef CONFIG_PREEMPT_RT_BASE
++ /* XXX move out of irq-off section */
++ slab_err(s, page, text, s->name);
++#else
+ void *addr = page_address(page);
+ void *p;
+ unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
+@@ -3620,6 +3688,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
+ slab_unlock(page);
+ kfree(map);
+ #endif
++#endif
+ }
+
+ /*
+@@ -3633,7 +3702,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+ struct page *page, *h;
+
+ BUG_ON(irqs_disabled());
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ list_for_each_entry_safe(page, h, &n->partial, lru) {
+ if (!page->inuse) {
+ remove_partial(n, page);
+@@ -3643,7 +3712,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+ "Objects remaining in %s on __kmem_cache_shutdown()");
+ }
+ }
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+
+ list_for_each_entry_safe(page, h, &discard, lru)
+ discard_slab(s, page);
+@@ -3901,7 +3970,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+ for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
+ INIT_LIST_HEAD(promote + i);
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ /*
+ * Build lists of slabs to discard or promote.
+@@ -3932,7 +4001,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+ for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
+ list_splice(promote + i, &n->partial);
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+
+ /* Release empty slabs */
+ list_for_each_entry_safe(page, t, &discard, lru)
+@@ -4108,6 +4177,12 @@ void __init kmem_cache_init(void)
+ {
+ static __initdata struct kmem_cache boot_kmem_cache,
+ boot_kmem_cache_node;
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
++ }
+
+ if (debug_guardpage_minorder())
+ slub_max_order = 0;
+@@ -4354,7 +4429,7 @@ static int validate_slab_node(struct kmem_cache *s,
+ struct page *page;
+ unsigned long flags;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ list_for_each_entry(page, &n->partial, lru) {
+ validate_slab_slab(s, page, map);
+@@ -4376,7 +4451,7 @@ static int validate_slab_node(struct kmem_cache *s,
+ s->name, count, atomic_long_read(&n->nr_slabs));
+
+ out:
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return count;
+ }
+
+@@ -4564,12 +4639,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
+ if (!atomic_long_read(&n->nr_slabs))
+ continue;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ process_slab(&t, s, page, alloc, map);
+ list_for_each_entry(page, &n->full, lru)
+ process_slab(&t, s, page, alloc, map);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+
+ for (i = 0; i < t.count; i++) {
+diff --git a/mm/swap.c b/mm/swap.c
+index 75c63bb2a1da..93fe549eb11e 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -32,6 +32,7 @@
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
+ #include <linux/uio.h>
++#include <linux/locallock.h>
+ #include <linux/hugetlb.h>
+ #include <linux/page_idle.h>
+
+@@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+ #ifdef CONFIG_SMP
+ static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+ #endif
++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
++DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
+
+ /*
+ * This path almost never happens for VM activity - pages are normally
+@@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page *page)
+ unsigned long flags;
+
+ get_page(page);
+- local_irq_save(flags);
++ local_lock_irqsave(rotate_lock, flags);
+ pvec = this_cpu_ptr(&lru_rotate_pvecs);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ pagevec_move_tail(pvec);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(rotate_lock, flags);
+ }
+ }
+
+@@ -294,12 +297,13 @@ void activate_page(struct page *page)
+ {
+ page = compound_head(page);
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
++ activate_page_pvecs);
+
+ get_page(page);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
+- put_cpu_var(activate_page_pvecs);
++ put_locked_var(swapvec_lock, activate_page_pvecs);
+ }
+ }
+
+@@ -326,7 +330,7 @@ void activate_page(struct page *page)
+
+ static void __lru_cache_activate_page(struct page *page)
+ {
+- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ int i;
+
+ /*
+@@ -348,7 +352,7 @@ static void __lru_cache_activate_page(struct page *page)
+ }
+ }
+
+- put_cpu_var(lru_add_pvec);
++ put_locked_var(swapvec_lock, lru_add_pvec);
+ }
+
+ /*
+@@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+
+ static void __lru_cache_add(struct page *page)
+ {
+- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+
+ get_page(page);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ __pagevec_lru_add(pvec);
+- put_cpu_var(lru_add_pvec);
++ put_locked_var(swapvec_lock, lru_add_pvec);
+ }
+
+ /**
+@@ -593,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
+ unsigned long flags;
+
+ /* No harm done if a racing interrupt already did this */
+- local_irq_save(flags);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ local_lock_irqsave_on(rotate_lock, flags, cpu);
+ pagevec_move_tail(pvec);
+- local_irq_restore(flags);
++ local_unlock_irqrestore_on(rotate_lock, flags, cpu);
++#else
++ local_lock_irqsave(rotate_lock, flags);
++ pagevec_move_tail(pvec);
++ local_unlock_irqrestore(rotate_lock, flags);
++#endif
+ }
+
+ pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
+@@ -627,11 +637,12 @@ void deactivate_file_page(struct page *page)
+ return;
+
+ if (likely(get_page_unless_zero(page))) {
+- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
++ lru_deactivate_file_pvecs);
+
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+- put_cpu_var(lru_deactivate_file_pvecs);
++ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
+ }
+ }
+
+@@ -646,27 +657,31 @@ void deactivate_file_page(struct page *page)
+ void deactivate_page(struct page *page)
+ {
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
++ lru_deactivate_pvecs);
+
+ get_page(page);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+- put_cpu_var(lru_deactivate_pvecs);
++ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ }
+ }
+
+ void lru_add_drain(void)
+ {
+- lru_add_drain_cpu(get_cpu());
+- put_cpu();
++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
++ local_unlock_cpu(swapvec_lock);
+ }
+
+-static void lru_add_drain_per_cpu(struct work_struct *dummy)
++#ifdef CONFIG_PREEMPT_RT_BASE
++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+ {
+- lru_add_drain();
++ local_lock_on(swapvec_lock, cpu);
++ lru_add_drain_cpu(cpu);
++ local_unlock_on(swapvec_lock, cpu);
+ }
+
+-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++#else
+
+ /*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+@@ -686,6 +701,22 @@ static int __init lru_init(void)
+ }
+ early_initcall(lru_init);
+
++static void lru_add_drain_per_cpu(struct work_struct *dummy)
++{
++ lru_add_drain();
++}
++
++static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
++{
++ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
++
++ INIT_WORK(work, lru_add_drain_per_cpu);
++ queue_work_on(cpu, lru_add_drain_wq, work);
++ cpumask_set_cpu(cpu, has_work);
++}
++#endif
++
+ void lru_add_drain_all(void)
+ {
+ static DEFINE_MUTEX(lock);
+@@ -697,21 +728,18 @@ void lru_add_drain_all(void)
+ cpumask_clear(&has_work);
+
+ for_each_online_cpu(cpu) {
+- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+-
+ if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
+ pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
+- need_activate_page_drain(cpu)) {
+- INIT_WORK(work, lru_add_drain_per_cpu);
+- queue_work_on(cpu, lru_add_drain_wq, work);
+- cpumask_set_cpu(cpu, &has_work);
+- }
++ need_activate_page_drain(cpu))
++ remote_lru_add_drain(cpu, &has_work);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
+ for_each_cpu(cpu, &has_work)
+ flush_work(&per_cpu(lru_add_drain_work, cpu));
++#endif
+
+ put_online_cpus();
+ mutex_unlock(&lock);
+diff --git a/mm/truncate.c b/mm/truncate.c
+index a01cce450a26..4bda37604f99 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -62,9 +62,12 @@ static void clear_exceptional_entry(struct address_space *mapping,
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_shadows(node) &&
+- !list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes,
++ !list_empty(&node->private_list)) {
++ local_lock(workingset_shadow_lock);
++ list_lru_del(&__workingset_shadow_nodes,
+ &node->private_list);
++ local_unlock(workingset_shadow_lock);
++ }
+ __radix_tree_delete_node(&mapping->page_tree, node);
+ unlock:
+ spin_unlock_irq(&mapping->tree_lock);
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 91f44e78c516..06ec393bb97d 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ struct vmap_area *va;
+ unsigned long vb_idx;
+- int node, err;
++ int node, err, cpu;
+ void *vaddr;
+
+ node = numa_node_id();
+@@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ BUG_ON(err);
+ radix_tree_preload_end();
+
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = this_cpu_ptr(&vmap_block_queue);
+ spin_lock(&vbq->lock);
+ list_add_tail_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+
+ return vaddr;
+ }
+@@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ void *vaddr = NULL;
+ unsigned int order;
++ int cpu;
+
+ BUG_ON(offset_in_page(size));
+ BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ order = get_order(size);
+
+ rcu_read_lock();
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = this_cpu_ptr(&vmap_block_queue);
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ unsigned long pages_off;
+
+@@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ break;
+ }
+
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+ rcu_read_unlock();
+
+ /* Allocate new block if nothing was found */
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 89cec42d19ff..fb73631fb90b 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+ long x;
+ long t;
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_zone_page_state);
+
+@@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
+ long x;
+ long t;
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_node_page_state);
+
+@@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ zone_page_state_add(v + overstep, zone, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ node_page_state_add(v + overstep, pgdat, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+@@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ zone_page_state_add(v - overstep, zone, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ s8 __percpu *p = pcp->vm_node_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ node_page_state_add(v - overstep, pgdat, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
+diff --git a/mm/workingset.c b/mm/workingset.c
+index 617475f529f4..48674bf36fb1 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -334,7 +334,8 @@ void workingset_activation(struct page *page)
+ * point where they would still be useful.
+ */
+
+-struct list_lru workingset_shadow_nodes;
++struct list_lru __workingset_shadow_nodes;
++DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+
+ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ struct shrink_control *sc)
+@@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ unsigned long pages;
+
+ /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
+- local_irq_disable();
+- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
+- local_irq_enable();
++ local_lock_irq(workingset_shadow_lock);
++ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
++ local_unlock_irq(workingset_shadow_lock);
+
+ if (memcg_kmem_enabled()) {
+ pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+@@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
+ spin_unlock(&mapping->tree_lock);
+ ret = LRU_REMOVED_RETRY;
+ out:
+- local_irq_enable();
++ local_unlock_irq(workingset_shadow_lock);
+ cond_resched();
+- local_irq_disable();
++ local_lock_irq(workingset_shadow_lock);
+ spin_lock(lru_lock);
+ return ret;
+ }
+@@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
+ unsigned long ret;
+
+ /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
+- local_irq_disable();
+- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
++ local_lock_irq(workingset_shadow_lock);
++ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
+ shadow_lru_isolate, NULL);
+- local_irq_enable();
++ local_unlock_irq(workingset_shadow_lock);
+ return ret;
+ }
+
+@@ -492,7 +493,7 @@ static int __init workingset_init(void)
+ pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+ timestamp_bits, max_order, bucket_order);
+
+- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
++ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key);
+ if (ret)
+ goto err;
+ ret = register_shrinker(&workingset_shadow_shrinker);
+@@ -500,7 +501,7 @@ static int __init workingset_init(void)
+ goto err_list_lru;
+ return 0;
+ err_list_lru:
+- list_lru_destroy(&workingset_shadow_nodes);
++ list_lru_destroy(&__workingset_shadow_nodes);
+ err:
+ return ret;
+ }
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index b0bc023d25c5..5af6426fbcbe 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -53,6 +53,7 @@
+ #include <linux/mount.h>
+ #include <linux/migrate.h>
+ #include <linux/pagemap.h>
++#include <linux/locallock.h>
+
+ #define ZSPAGE_MAGIC 0x58
+
+@@ -70,9 +71,22 @@
+ */
+ #define ZS_MAX_ZSPAGE_ORDER 2
+ #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+-
+ #define ZS_HANDLE_SIZE (sizeof(unsigned long))
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++struct zsmalloc_handle {
++ unsigned long addr;
++ struct mutex lock;
++};
++
++#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
++
++#else
++
++#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
++#endif
++
+ /*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (unsigned long) handle value.
+@@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
+
+ static int create_cache(struct zs_pool *pool)
+ {
+- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
++ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
+ 0, 0, NULL);
+ if (!pool->handle_cachep)
+ return 1;
+@@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool *pool)
+
+ static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
+ {
+- return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
+- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++ void *p;
++
++ p = kmem_cache_alloc(pool->handle_cachep,
++ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (p) {
++ struct zsmalloc_handle *zh = p;
++
++ mutex_init(&zh->lock);
++ }
++#endif
++ return (unsigned long)p;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
++{
++ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
++}
++#endif
++
+ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
+ {
+ kmem_cache_free(pool->handle_cachep, (void *)handle);
+@@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ WRITE_ONCE(zh->addr, obj);
++#else
+ /*
+ * lsb of @obj represents handle lock while other bits
+ * represent object value the handle is pointing so
+ * updating shouldn't do store tearing.
+ */
+ WRITE_ONCE(*(unsigned long *)handle, obj);
++#endif
+ }
+
+ /* zpool driver */
+@@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc");
+
+ /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+ static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
++static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock);
+
+ static bool is_zspage_isolated(struct zspage *zspage)
+ {
+@@ -902,7 +940,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
+
+ static unsigned long handle_to_obj(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return zh->addr;
++#else
+ return *(unsigned long *)handle;
++#endif
+ }
+
+ static unsigned long obj_to_head(struct page *page, void *obj)
+@@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
+
+ static inline int testpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_is_locked(&zh->lock);
++#else
+ return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static inline int trypin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_trylock(&zh->lock);
++#else
+ return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void pin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_lock(&zh->lock);
++#else
+ bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void unpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_unlock(&zh->lock);
++#else
+ bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void reset_page(struct page *page)
+@@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ class = pool->size_class[class_idx];
+ off = (class->size * obj_idx) & ~PAGE_MASK;
+
+- area = &get_cpu_var(zs_map_area);
++ area = &get_locked_var(zs_map_area_lock, zs_map_area);
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+@@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+
+ __zs_unmap_object(area, pages, off, class->size);
+ }
+- put_cpu_var(zs_map_area);
++ put_locked_var(zs_map_area_lock, zs_map_area);
+
+ migrate_read_unlock(zspage);
+ unpin_tag(handle);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ea6312057a71..d114a4692cde 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
+ static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
+
+ static seqcount_t devnet_rename_seq;
++static DEFINE_MUTEX(devnet_rename_mutex);
+
+ static inline void dev_base_seq_inc(struct net *net)
+ {
+@@ -211,14 +212,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_lock(&sd->input_pkt_queue.lock);
++ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_unlock(&sd->input_pkt_queue.lock);
++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+@@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
+ strcpy(name, dev->name);
+ rcu_read_unlock();
+ if (read_seqcount_retry(&devnet_rename_seq, seq)) {
+- cond_resched();
++ mutex_lock(&devnet_rename_mutex);
++ mutex_unlock(&devnet_rename_mutex);
+ goto retry;
+ }
+
+@@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+- write_seqcount_begin(&devnet_rename_seq);
++ mutex_lock(&devnet_rename_mutex);
++ __raw_write_seqcount_begin(&devnet_rename_seq);
+
+- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
+- write_seqcount_end(&devnet_rename_seq);
+- return 0;
+- }
++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
++ goto outunlock;
+
+ memcpy(oldname, dev->name, IFNAMSIZ);
+
+ err = dev_get_valid_name(net, dev, newname);
+- if (err < 0) {
+- write_seqcount_end(&devnet_rename_seq);
+- return err;
+- }
++ if (err < 0)
++ goto outunlock;
+
+ if (oldname[0] && !strchr(oldname, '%'))
+ netdev_info(dev, "renamed from %s\n", oldname);
+@@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *dev, const char *newname)
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ dev->name_assign_type = old_assign_type;
+- write_seqcount_end(&devnet_rename_seq);
+- return ret;
++ err = ret;
++ goto outunlock;
+ }
+
+- write_seqcount_end(&devnet_rename_seq);
++ __raw_write_seqcount_end(&devnet_rename_seq);
++ mutex_unlock(&devnet_rename_mutex);
+
+ netdev_adjacent_rename_links(dev, oldname);
+
+@@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *dev, const char *newname)
+ /* err >= 0 after dev_alloc_name() or stores the first errno */
+ if (err >= 0) {
+ err = ret;
+- write_seqcount_begin(&devnet_rename_seq);
++ mutex_lock(&devnet_rename_mutex);
++ __raw_write_seqcount_begin(&devnet_rename_seq);
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ memcpy(oldname, newname, IFNAMSIZ);
+ dev->name_assign_type = old_assign_type;
+@@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *dev, const char *newname)
+ }
+
+ return err;
++
++outunlock:
++ __raw_write_seqcount_end(&devnet_rename_seq);
++ mutex_unlock(&devnet_rename_mutex);
++ return err;
+ }
+
+ /**
+@@ -2268,6 +2274,7 @@ static void __netif_reschedule(struct Qdisc *q)
+ sd->output_queue_tailp = &q->next_sched;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ void __netif_schedule(struct Qdisc *q)
+@@ -2349,6 +2356,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+ __this_cpu_write(softnet_data.completion_queue, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__dev_kfree_skb_irq);
+
+@@ -3082,7 +3090,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+ * This permits qdisc->running owner to get the lock more
+ * often and dequeue packets faster.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ contended = true;
++#else
+ contended = qdisc_is_running(q);
++#endif
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
+@@ -3145,8 +3157,10 @@ static void skb_update_prio(struct sk_buff *skb)
+ #define skb_update_prio(skb)
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ DEFINE_PER_CPU(int, xmit_recursion);
+ EXPORT_SYMBOL(xmit_recursion);
++#endif
+
+ /**
+ * dev_loopback_xmit - loop back @skb
+@@ -3390,8 +3404,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+ int cpu = smp_processor_id(); /* ok because BHs are off */
+
+ if (txq->xmit_lock_owner != cpu) {
+- if (unlikely(__this_cpu_read(xmit_recursion) >
+- XMIT_RECURSION_LIMIT))
++ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
+ goto recursion_alert;
+
+ skb = validate_xmit_skb(skb, dev);
+@@ -3401,9 +3414,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+ HARD_TX_LOCK(dev, txq, cpu);
+
+ if (!netif_xmit_stopped(txq)) {
+- __this_cpu_inc(xmit_recursion);
++ xmit_rec_inc();
+ skb = dev_hard_start_xmit(skb, dev, txq, &rc);
+- __this_cpu_dec(xmit_recursion);
++ xmit_rec_dec();
+ if (dev_xmit_complete(rc)) {
+ HARD_TX_UNLOCK(dev, txq);
+ goto out;
+@@ -3777,6 +3790,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+@@ -3795,7 +3809,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
+
+- preempt_disable();
++ migrate_disable();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+@@ -3805,13 +3819,13 @@ static int netif_rx_internal(struct sk_buff *skb)
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+
+ rcu_read_unlock();
+- preempt_enable();
++ migrate_enable();
+ } else
+ #endif
+ {
+ unsigned int qtail;
+- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+- put_cpu();
++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
++ put_cpu_light();
+ }
+ return ret;
+ }
+@@ -3845,11 +3859,9 @@ int netif_rx_ni(struct sk_buff *skb)
+
+ trace_netif_rx_ni_entry(skb);
+
+- preempt_disable();
++ local_bh_disable();
+ err = netif_rx_internal(skb);
+- if (local_softirq_pending())
+- do_softirq();
+- preempt_enable();
++ local_bh_enable();
+
+ return err;
+ }
+@@ -4321,7 +4333,7 @@ static void flush_backlog(void *arg)
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->input_pkt_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
+@@ -4330,10 +4342,13 @@ static void flush_backlog(void *arg)
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->process_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
++
++ if (!skb_queue_empty(&sd->tofree_queue))
++ raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+
+ static int napi_gro_complete(struct sk_buff *skb)
+@@ -4795,6 +4810,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+ sd->rps_ipi_list = NULL;
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Send pending IPI's to kick RPS processing on remote cpus. */
+ while (remsd) {
+@@ -4808,6 +4824,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+ } else
+ #endif
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+@@ -4889,6 +4906,7 @@ void __napi_schedule(struct napi_struct *n)
+ local_irq_save(flags);
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__napi_schedule);
+
+@@ -5229,7 +5247,7 @@ static void net_rx_action(struct softirq_action *h)
+ list_splice_tail(&repoll, &list);
+ list_splice(&list, &sd->poll_list);
+ if (!list_empty(&sd->poll_list))
+- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
++ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
+
+ net_rps_action_and_irq_enable(sd);
+ }
+@@ -7736,7 +7754,7 @@ EXPORT_SYMBOL(free_netdev);
+ void synchronize_net(void)
+ {
+ might_sleep();
+- if (rtnl_is_locked())
++ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
+ synchronize_rcu_expedited();
+ else
+ synchronize_rcu();
+@@ -7977,16 +7995,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->process_queue))) {
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
+- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
++ kfree_skb(skb);
++ }
+
+ return NOTIFY_OK;
+ }
+@@ -8288,8 +8310,9 @@ static int __init net_dev_init(void)
+ for_each_possible_cpu(i) {
+ struct softnet_data *sd = &per_cpu(softnet_data, i);
+
+- skb_queue_head_init(&sd->input_pkt_queue);
+- skb_queue_head_init(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->input_pkt_queue);
++ skb_queue_head_init_raw(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->tofree_queue);
+ INIT_LIST_HEAD(&sd->poll_list);
+ sd->output_queue_tailp = &sd->output_queue;
+ #ifdef CONFIG_RPS
+diff --git a/net/core/filter.c b/net/core/filter.c
+index cb06aceb512a..3585a8982287 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1592,7 +1592,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
+ {
+ int ret;
+
+- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
++ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
+ net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+ kfree_skb(skb);
+ return -ENETDOWN;
+@@ -1600,9 +1600,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
+
+ skb->dev = dev;
+
+- __this_cpu_inc(xmit_recursion);
++ xmit_rec_inc();
+ ret = dev_queue_xmit(skb);
+- __this_cpu_dec(xmit_recursion);
++ xmit_rec_dec();
+
+ return ret;
+ }
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index cad8e791f28e..2a9364fe62a5 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -84,7 +84,7 @@ struct gen_estimator
+ struct gnet_stats_basic_packed *bstats;
+ struct gnet_stats_rate_est64 *rate_est;
+ spinlock_t *stats_lock;
+- seqcount_t *running;
++ net_seqlock_t *running;
+ int ewma_log;
+ u32 last_packets;
+ unsigned long avpps;
+@@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running,
++ net_seqlock_t *running,
+ struct nlattr *opt)
+ {
+ struct gen_estimator *est;
+@@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct gnet_stats_rate_est64 *rate_est,
+ spinlock_t *stats_lock,
+- seqcount_t *running, struct nlattr *opt)
++ net_seqlock_t *running, struct nlattr *opt)
+ {
+ gen_kill_estimator(bstats, rate_est);
+ return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 508e051304fb..bc3b17b78c94 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -130,7 +130,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
+ }
+
+ void
+-__gnet_stats_copy_basic(const seqcount_t *running,
++__gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+@@ -143,10 +143,10 @@ __gnet_stats_copy_basic(const seqcount_t *running,
+ }
+ do {
+ if (running)
+- seq = read_seqcount_begin(running);
++ seq = net_seq_begin(running);
+ bstats->bytes = b->bytes;
+ bstats->packets = b->packets;
+- } while (running && read_seqcount_retry(running, seq));
++ } while (running && net_seq_retry(running, seq));
+ }
+ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+
+@@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+ * if the room in the socket buffer was not sufficient.
+ */
+ int
+-gnet_stats_copy_basic(const seqcount_t *running,
++gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 3864b4b68fa1..55c73ade9faa 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -64,6 +64,7 @@
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
+ #include <linux/if_vlan.h>
++#include <linux/locallock.h>
+
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -360,6 +361,8 @@ struct napi_alloc_cache {
+
+ static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+ static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
++static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
+
+ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ {
+@@ -367,10 +370,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ unsigned long flags;
+ void *data;
+
+- local_irq_save(flags);
++ local_lock_irqsave(netdev_alloc_lock, flags);
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = __alloc_page_frag(nc, fragsz, gfp_mask);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(netdev_alloc_lock, flags);
+ return data;
+ }
+
+@@ -389,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+
+ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
++ void *data;
+
+- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
++ data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
++ return data;
+ }
+
+ void *napi_alloc_frag(unsigned int fragsz)
+@@ -438,13 +445,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
+- local_irq_save(flags);
++ local_lock_irqsave(netdev_alloc_lock, flags);
+
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = __alloc_page_frag(nc, len, gfp_mask);
+ pfmemalloc = nc->pfmemalloc;
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(netdev_alloc_lock, flags);
+
+ if (unlikely(!data))
+ return NULL;
+@@ -485,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ gfp_t gfp_mask)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+ struct sk_buff *skb;
+ void *data;
++ bool pfmemalloc;
+
+ len += NET_SKB_PAD + NET_IP_ALIGN;
+
+@@ -505,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ data = __alloc_page_frag(&nc->page, len, gfp_mask);
++ pfmemalloc = nc->page.pfmemalloc;
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ if (unlikely(!data))
+ return NULL;
+
+@@ -516,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ }
+
+ /* use OR instead of assignment to avoid clearing of bits in mask */
+- if (nc->page.pfmemalloc)
++ if (pfmemalloc)
+ skb->pfmemalloc = 1;
+ skb->head_frag = 1;
+
+@@ -760,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
+
+ void __kfree_skb_flush(void)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ /* flush skb_cache if containing objects */
+ if (nc->skb_count) {
+ kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
+ nc->skb_cache);
+ nc->skb_count = 0;
+ }
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ }
+
+ static inline void _kfree_skb_defer(struct sk_buff *skb)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+
+ /* drop skb->head and call any destructors for packet */
+ skb_release_all(skb);
+
++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ /* record skb to CPU local list */
+ nc->skb_cache[nc->skb_count++] = skb;
+
+@@ -791,6 +805,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
+ nc->skb_cache);
+ nc->skb_count = 0;
+ }
++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ }
+ void __kfree_skb_defer(struct sk_buff *skb)
+ {
+diff --git a/net/core/sock.c b/net/core/sock.c
+index fd7b41edf1ce..e425d259a9f0 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2508,12 +2508,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
+ if (sk->sk_lock.owned)
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+- spin_unlock(&sk->sk_lock.slock);
++ spin_unlock_bh(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+- local_bh_enable();
+ }
+ EXPORT_SYMBOL(lock_sock_nested);
+
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 38abe70e595f..443259a04862 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -69,6 +69,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/fcntl.h>
++#include <linux/sysrq.h>
+ #include <linux/socket.h>
+ #include <linux/in.h>
+ #include <linux/inet.h>
+@@ -77,6 +78,7 @@
+ #include <linux/string.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ #include <net/snmp.h>
+ #include <net/ip.h>
+ #include <net/route.h>
+@@ -204,6 +206,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
+ *
+ * On SMP we have one ICMP socket per-cpu.
+ */
++static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
++
+ static struct sock *icmp_sk(struct net *net)
+ {
+ return *this_cpu_ptr(net->ipv4.icmp_sk);
+@@ -215,12 +219,14 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
+
+ local_bh_disable();
+
++ local_lock(icmp_sk_lock);
+ sk = icmp_sk(net);
+
+ if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
+ /* This can happen if the output path signals a
+ * dst_link_failure() for an outgoing ICMP packet.
+ */
++ local_unlock(icmp_sk_lock);
+ local_bh_enable();
+ return NULL;
+ }
+@@ -230,6 +236,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
+ static inline void icmp_xmit_unlock(struct sock *sk)
+ {
+ spin_unlock_bh(&sk->sk_lock.slock);
++ local_unlock(icmp_sk_lock);
+ }
+
+ int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
+@@ -358,6 +365,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
+ struct sock *sk;
+ struct sk_buff *skb;
+
++ local_lock(icmp_sk_lock);
+ sk = icmp_sk(dev_net((*rt)->dst.dev));
+ if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
+ icmp_param->data_len+icmp_param->head_len,
+@@ -380,6 +388,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_push_pending_frames(sk, fl4);
+ }
++ local_unlock(icmp_sk_lock);
+ }
+
+ /*
+@@ -891,6 +900,30 @@ static bool icmp_redirect(struct sk_buff *skb)
+ }
+
+ /*
++ * 32bit and 64bit have different timestamp length, so we check for
++ * the cookie at offset 20 and verify it is repeated at offset 50
++ */
++#define CO_POS0 20
++#define CO_POS1 50
++#define CO_SIZE sizeof(int)
++#define ICMP_SYSRQ_SIZE 57
++
++/*
++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
++ * pattern and if it matches send the next byte as a trigger to sysrq.
++ */
++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
++{
++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
++ char *p = skb->data;
++
++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
++ handle_sysrq(p[CO_POS0 + CO_SIZE]);
++}
++
++/*
+ * Handle ICMP_ECHO ("ping") requests.
+ *
+ * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
+@@ -917,6 +950,11 @@ static bool icmp_echo(struct sk_buff *skb)
+ icmp_param.data_len = skb->len;
+ icmp_param.head_len = sizeof(struct icmphdr);
+ icmp_reply(&icmp_param, skb);
++
++ if (skb->len == ICMP_SYSRQ_SIZE &&
++ net->ipv4.sysctl_icmp_echo_sysrq) {
++ icmp_check_sysrq(net, skb);
++ }
+ }
+ /* should there be an ICMP stat for ignored echos? */
+ return true;
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 1cb67de106fe..332a485323f0 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[] = {
+ .proc_handler = proc_dointvec
+ },
+ {
++ .procname = "icmp_echo_sysrq",
++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
+ .procname = "icmp_ignore_bogus_error_responses",
+ .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
+ .maxlen = sizeof(int),
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 7158d4f8dae4..0dc007fc6704 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -62,6 +62,7 @@
+ #include <linux/init.h>
+ #include <linux/times.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+
+ #include <net/net_namespace.h>
+ #include <net/icmp.h>
+@@ -565,6 +566,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(tcp_v4_send_check);
+
++static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
+ /*
+ * This routine will send an RST to the other tcp.
+ *
+@@ -692,6 +694,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ offsetof(struct inet_timewait_sock, tw_bound_dev_if));
+
+ arg.tos = ip_hdr(skb)->tos;
++
++ local_lock(tcp_sk_lock);
+ local_bh_disable();
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, &TCP_SKB_CB(skb)->header.h4.opt,
+@@ -701,6 +705,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+ local_bh_enable();
++ local_unlock(tcp_sk_lock);
+
+ #ifdef CONFIG_TCP_MD5SIG
+ out:
+@@ -776,6 +781,7 @@ static void tcp_v4_send_ack(struct net *net,
+ if (oif)
+ arg.bound_dev_if = oif;
+ arg.tos = tos;
++ local_lock(tcp_sk_lock);
+ local_bh_disable();
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, &TCP_SKB_CB(skb)->header.h4.opt,
+@@ -784,6 +790,7 @@ static void tcp_v4_send_ack(struct net *net,
+
+ __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ local_bh_enable();
++ local_unlock(tcp_sk_lock);
+ }
+
+ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 9dce3b157908..525efa5309ac 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4064,7 +4064,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+
+- WARN_ON_ONCE(softirq_count() == 0);
++ WARN_ON_ONCE_NONRT(softirq_count() == 0);
+
+ if (WARN_ON(status->band >= NUM_NL80211_BANDS))
+ goto drop;
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index f39276d1c2d7..10880c89d62f 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -22,11 +22,17 @@
+ #include <linux/proc_fs.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+
+ #include "nf_internals.h"
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
++EXPORT_PER_CPU_SYMBOL(xt_write_lock);
++#endif
++
+ static DEFINE_MUTEX(afinfo_mutex);
+
+ const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 33a4697d5539..475cb74bf825 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -63,6 +63,7 @@
+ #include <linux/if_packet.h>
+ #include <linux/wireless.h>
+ #include <linux/kernel.h>
++#include <linux/delay.h>
+ #include <linux/kmod.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+@@ -695,7 +696,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
+ if (BLOCK_NUM_PKTS(pbd)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
+@@ -957,7 +958,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ if (!(status & TP_STATUS_BLK_TMO)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+ prb_close_block(pkc, pbd, po, status);
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index 977f69886c00..f3e7a36b0396 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+ #include <linux/llist.h>
++#include <linux/delay.h>
+
+ #include "rds_single_path.h"
+ #include "ib_mr.h"
+@@ -210,7 +211,7 @@ static inline void wait_clean_list_grace(void)
+ for_each_online_cpu(cpu) {
+ flag = &per_cpu(clean_list_grace, cpu);
+ while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
+diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
+index 814d285ff802..d4d088e9be85 100644
+--- a/net/rxrpc/security.c
++++ b/net/rxrpc/security.c
+@@ -19,9 +19,6 @@
+ #include <keys/rxrpc-type.h>
+ #include "ar-internal.h"
+
+-static LIST_HEAD(rxrpc_security_methods);
+-static DECLARE_RWSEM(rxrpc_security_sem);
+-
+ static const struct rxrpc_security *rxrpc_security_types[] = {
+ [RXRPC_SECURITY_NONE] = &rxrpc_no_security,
+ #ifdef CONFIG_RXKAD
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 12ebde845523..99f3ce50c6c4 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -975,7 +975,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
+ rcu_assign_pointer(sch->stab, stab);
+ }
+ if (tca[TCA_RATE]) {
+- seqcount_t *running;
++ net_seqlock_t *running;
+
+ err = -EOPNOTSUPP;
+ if (sch->flags & TCQ_F_MQROOT)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 657c13362b19..cbab8d4d5864 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -426,7 +426,11 @@ struct Qdisc noop_qdisc = {
+ .list = LIST_HEAD_INIT(noop_qdisc.list),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ .dev_queue = &noop_netdev_queue,
++#ifdef CONFIG_PREEMPT_RT_BASE
++ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
++#else
+ .running = SEQCNT_ZERO(noop_qdisc.running),
++#endif
+ .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
+ };
+ EXPORT_SYMBOL(noop_qdisc);
+@@ -620,9 +624,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ lockdep_set_class(&sch->busylock,
+ dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++ seqlock_init(&sch->running);
++ lockdep_set_class(&sch->running.seqcount,
++ dev->qdisc_running_key ?: &qdisc_running_key);
++ lockdep_set_class(&sch->running.lock,
++ dev->qdisc_running_key ?: &qdisc_running_key);
++#else
+ seqcount_init(&sch->running);
+ lockdep_set_class(&sch->running,
+ dev->qdisc_running_key ?: &qdisc_running_key);
++#endif
+
+ sch->ops = ops;
+ sch->enqueue = ops->enqueue;
+@@ -917,7 +929,7 @@ void dev_deactivate_many(struct list_head *head)
+ /* Wait for outstanding qdisc_run calls. */
+ list_for_each_entry(dev, head, close_list)
+ while (some_qdisc_is_busy(dev))
+- yield();
++ msleep(1);
+ }
+
+ void dev_deactivate(struct net_device *dev)
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index c3f652395a80..2dd84493528e 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+ goto out;
+ }
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
+
+ atomic_long_inc(&pool->sp_stats.packets);
+@@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+
+ atomic_long_inc(&pool->sp_stats.threads_woken);
+ wake_up_process(rqstp->rq_task);
+- put_cpu();
++ put_cpu_light();
+ goto out;
+ }
+ rcu_read_unlock();
+@@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+ goto redo_search;
+ }
+ rqstp = NULL;
+- put_cpu();
++ put_cpu_light();
+ out:
+ trace_svc_xprt_do_enqueue(xprt, rqstp);
+ }
+diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
+index 6fdc97ef6023..523e0420d7f0 100755
+--- a/scripts/mkcompile_h
++++ b/scripts/mkcompile_h
+@@ -4,7 +4,8 @@ TARGET=$1
+ ARCH=$2
+ SMP=$3
+ PREEMPT=$4
+-CC=$5
++RT=$5
++CC=$6
+
+ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
+
+@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
+ CONFIG_FLAGS=""
+ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+ if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
+ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
+
+ # Truncate to maximum length
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index c61fd50f771f..1583de410f62 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
+ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
+ {
+ if (!substream->pcm->nonatomic)
+- local_irq_disable();
++ local_irq_disable_nort();
+ snd_pcm_stream_lock(substream);
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+@@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
+ {
+ snd_pcm_stream_unlock(substream);
+ if (!substream->pcm->nonatomic)
+- local_irq_enable();
++ local_irq_enable_nort();
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
+
+@@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
+ {
+ unsigned long flags = 0;
+ if (!substream->pcm->nonatomic)
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ snd_pcm_stream_lock(substream);
+ return flags;
+ }
+@@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ {
+ snd_pcm_stream_unlock(substream);
+ if (!substream->pcm->nonatomic)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
+
diff --git a/target/linux/patches/4.8.5/startup.patch b/target/linux/patches/4.8.6/startup.patch
index e54ac19a6..e54ac19a6 100644
--- a/target/linux/patches/4.8.5/startup.patch
+++ b/target/linux/patches/4.8.6/startup.patch
diff --git a/target/linux/patches/4.8.5/vdso2.patch b/target/linux/patches/4.8.6/vdso2.patch
index 35df488a8..35df488a8 100644
--- a/target/linux/patches/4.8.5/vdso2.patch
+++ b/target/linux/patches/4.8.6/vdso2.patch