summaryrefslogtreecommitdiff
path: root/target/linux/patches
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-09-09 08:25:47 +0200
committerWaldemar Brodkorb <wbx@openadk.org>2015-09-09 08:25:52 +0200
commited130c4bcc41e54fdad84bff1b90a6804c4564f4 (patch)
treeb34ffea65f3d3c2ea512040c25e81e8d5efedfd5 /target/linux/patches
parent46f07863b1ef718701e6e654c09f5e5b591e4138 (diff)
add realtime patch to 4.1.x kernel
Diffstat (limited to 'target/linux/patches')
-rw-r--r--target/linux/patches/4.1.6/realtime.patch26664
1 files changed, 26664 insertions, 0 deletions
diff --git a/target/linux/patches/4.1.6/realtime.patch b/target/linux/patches/4.1.6/realtime.patch
new file mode 100644
index 000000000..9b5b92ee1
--- /dev/null
+++ b/target/linux/patches/4.1.6/realtime.patch
@@ -0,0 +1,26664 @@
+diff -Nur linux-4.1.6.orig/arch/alpha/mm/fault.c linux-4.1.6/arch/alpha/mm/fault.c
+--- linux-4.1.6.orig/arch/alpha/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/alpha/mm/fault.c 2015-09-08 23:49:03.498378314 +0200
+@@ -23,8 +23,7 @@
+ #include <linux/smp.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+-
+-#include <asm/uaccess.h>
++#include <linux/uaccess.h>
+
+ extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
+
+@@ -107,7 +106,7 @@
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic())
++ if (!mm || faulthandler_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+diff -Nur linux-4.1.6.orig/arch/arc/include/asm/futex.h linux-4.1.6/arch/arc/include/asm/futex.h
+--- linux-4.1.6.orig/arch/arc/include/asm/futex.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arc/include/asm/futex.h 2015-09-08 23:49:03.498378314 +0200
+@@ -53,7 +53,7 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+- pagefault_disable(); /* implies preempt_disable() */
++ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+@@ -75,7 +75,7 @@
+ ret = -ENOSYS;
+ }
+
+- pagefault_enable(); /* subsumes preempt_enable() */
++ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+@@ -104,7 +104,7 @@
+ return ret;
+ }
+
+-/* Compare-xchg with preemption disabled.
++/* Compare-xchg with pagefaults disabled.
+ * Notes:
+ * -Best-Effort: Exchg happens only if compare succeeds.
+ * If compare fails, returns; leaving retry/looping to upper layers
+@@ -121,7 +121,7 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+- pagefault_disable(); /* implies preempt_disable() */
++ pagefault_disable();
+
+ /* TBD : can use llock/scond */
+ __asm__ __volatile__(
+@@ -142,7 +142,7 @@
+ : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+- pagefault_enable(); /* subsumes preempt_enable() */
++ pagefault_enable();
+
+ *uval = val;
+ return val;
+diff -Nur linux-4.1.6.orig/arch/arc/mm/fault.c linux-4.1.6/arch/arc/mm/fault.c
+--- linux-4.1.6.orig/arch/arc/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arc/mm/fault.c 2015-09-08 23:49:03.498378314 +0200
+@@ -86,7 +86,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (faulthandler_disabled() || !mm)
+ goto no_context;
+
+ if (user_mode(regs))
+diff -Nur linux-4.1.6.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.6/arch/arm/include/asm/cmpxchg.h
+--- linux-4.1.6.orig/arch/arm/include/asm/cmpxchg.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/include/asm/cmpxchg.h 2015-09-08 23:49:03.498378314 +0200
+@@ -129,6 +129,8 @@
+
+ #else /* min ARCH >= ARMv6 */
+
++#define __HAVE_ARCH_CMPXCHG 1
++
+ extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+ /*
+diff -Nur linux-4.1.6.orig/arch/arm/include/asm/futex.h linux-4.1.6/arch/arm/include/asm/futex.h
+--- linux-4.1.6.orig/arch/arm/include/asm/futex.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/include/asm/futex.h 2015-09-08 23:49:03.498378314 +0200
+@@ -93,6 +93,7 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
++ preempt_disable();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+@@ -104,6 +105,8 @@
+ : "cc", "memory");
+
+ *uval = val;
++ preempt_enable();
++
+ return ret;
+ }
+
+@@ -124,7 +127,10 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- pagefault_disable(); /* implies preempt_disable() */
++#ifndef CONFIG_SMP
++ preempt_disable();
++#endif
++ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+@@ -146,7 +152,10 @@
+ ret = -ENOSYS;
+ }
+
+- pagefault_enable(); /* subsumes preempt_enable() */
++ pagefault_enable();
++#ifndef CONFIG_SMP
++ preempt_enable();
++#endif
+
+ if (!ret) {
+ switch (cmp) {
+diff -Nur linux-4.1.6.orig/arch/arm/include/asm/switch_to.h linux-4.1.6/arch/arm/include/asm/switch_to.h
+--- linux-4.1.6.orig/arch/arm/include/asm/switch_to.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/include/asm/switch_to.h 2015-09-08 23:49:03.498378314 +0200
+@@ -3,6 +3,13 @@
+
+ #include <linux/thread_info.h>
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ /*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -22,6 +29,7 @@
+
+ #define switch_to(prev,next,last) \
+ do { \
++ switch_kmaps(prev, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+ } while (0)
+
+diff -Nur linux-4.1.6.orig/arch/arm/include/asm/thread_info.h linux-4.1.6/arch/arm/include/asm/thread_info.h
+--- linux-4.1.6.orig/arch/arm/include/asm/thread_info.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/include/asm/thread_info.h 2015-09-08 23:49:03.498378314 +0200
+@@ -50,6 +50,7 @@
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ __u32 cpu; /* cpu */
+@@ -147,6 +148,7 @@
+ #define TIF_SIGPENDING 0
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 3
+ #define TIF_UPROBE 7
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+@@ -160,6 +162,7 @@
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+diff -Nur linux-4.1.6.orig/arch/arm/Kconfig linux-4.1.6/arch/arm/Kconfig
+--- linux-4.1.6.orig/arch/arm/Kconfig 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/Kconfig 2015-09-08 23:49:03.498378314 +0200
+@@ -31,7 +31,7 @@
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
++ select HAVE_ARCH_JUMP_LABEL if (!XIP_KERNEL && !PREEMPT_RT_BASE)
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_TRACEHOOK
+@@ -66,6 +66,7 @@
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+diff -Nur linux-4.1.6.orig/arch/arm/kernel/asm-offsets.c linux-4.1.6/arch/arm/kernel/asm-offsets.c
+--- linux-4.1.6.orig/arch/arm/kernel/asm-offsets.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kernel/asm-offsets.c 2015-09-08 23:49:03.498378314 +0200
+@@ -65,6 +65,7 @@
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff -Nur linux-4.1.6.orig/arch/arm/kernel/entry-armv.S linux-4.1.6/arch/arm/kernel/entry-armv.S
+--- linux-4.1.6.orig/arch/arm/kernel/entry-armv.S 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kernel/entry-armv.S 2015-09-08 23:49:03.498378314 +0200
+@@ -208,11 +208,18 @@
+ #ifdef CONFIG_PREEMPT
+ get_thread_info tsk
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
++
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -227,6 +234,8 @@
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ reteq r8 @ go again
+ b 1b
+ #endif
+diff -Nur linux-4.1.6.orig/arch/arm/kernel/process.c linux-4.1.6/arch/arm/kernel/process.c
+--- linux-4.1.6.orig/arch/arm/kernel/process.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kernel/process.c 2015-09-08 23:49:03.498378314 +0200
+@@ -290,6 +290,30 @@
+ }
+
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
++
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+diff -Nur linux-4.1.6.orig/arch/arm/kernel/signal.c linux-4.1.6/arch/arm/kernel/signal.c
+--- linux-4.1.6.orig/arch/arm/kernel/signal.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kernel/signal.c 2015-09-08 23:49:03.498378314 +0200
+@@ -563,7 +563,8 @@
+ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+diff -Nur linux-4.1.6.orig/arch/arm/kernel/unwind.c linux-4.1.6/arch/arm/kernel/unwind.c
+--- linux-4.1.6.orig/arch/arm/kernel/unwind.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kernel/unwind.c 2015-09-08 23:49:03.498378314 +0200
+@@ -93,7 +93,7 @@
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+
+ /* Convert a prel31 symbol to an absolute address */
+@@ -201,7 +201,7 @@
+ /* module unwind tables */
+ struct unwind_table *table;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+@@ -213,7 +213,7 @@
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -529,9 +529,9 @@
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+ }
+@@ -543,9 +543,9 @@
+ if (!tab)
+ return;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/kvm/arm.c linux-4.1.6/arch/arm/kvm/arm.c
+--- linux-4.1.6.orig/arch/arm/kvm/arm.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kvm/arm.c 2015-09-08 23:49:03.498378314 +0200
+@@ -474,9 +474,9 @@
+
+ static void vcpu_pause(struct kvm_vcpu *vcpu)
+ {
+- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+- wait_event_interruptible(*wq, !vcpu->arch.pause);
++ swait_event_interruptible(*wq, !vcpu->arch.pause);
+ }
+
+ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+diff -Nur linux-4.1.6.orig/arch/arm/kvm/psci.c linux-4.1.6/arch/arm/kvm/psci.c
+--- linux-4.1.6.orig/arch/arm/kvm/psci.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/kvm/psci.c 2015-09-08 23:49:03.498378314 +0200
+@@ -68,7 +68,7 @@
+ {
+ struct kvm *kvm = source_vcpu->kvm;
+ struct kvm_vcpu *vcpu = NULL;
+- wait_queue_head_t *wq;
++ struct swait_head *wq;
+ unsigned long cpu_id;
+ unsigned long context_id;
+ phys_addr_t target_pc;
+@@ -117,7 +117,7 @@
+ smp_mb(); /* Make sure the above is visible */
+
+ wq = kvm_arch_vcpu_wq(vcpu);
+- wake_up_interruptible(wq);
++ swait_wake_interruptible(wq);
+
+ return PSCI_RET_SUCCESS;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.6/arch/arm/mach-exynos/platsmp.c
+--- linux-4.1.6.orig/arch/arm/mach-exynos/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-exynos/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -231,7 +231,7 @@
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -244,8 +244,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -259,7 +259,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -286,7 +286,7 @@
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -342,7 +342,7 @@
+ * calibrations, then wait for it to finish
+ */
+ fail:
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? ret : 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.6/arch/arm/mach-hisi/platmcpm.c
+--- linux-4.1.6.orig/arch/arm/mach-hisi/platmcpm.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-hisi/platmcpm.c 2015-09-08 23:49:03.498378314 +0200
+@@ -57,7 +57,7 @@
+
+ static void __iomem *sysctrl, *fabric;
+ static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static u32 fabric_phys_addr;
+ /*
+ * [0]: bootwrapper physical address
+@@ -104,7 +104,7 @@
+ if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
+ return -EINVAL;
+
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+
+ if (hip04_cpu_table[cluster][cpu])
+ goto out;
+@@ -133,7 +133,7 @@
+ udelay(20);
+ out:
+ hip04_cpu_table[cluster][cpu]++;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+
+ return 0;
+ }
+@@ -149,7 +149,7 @@
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ hip04_cpu_table[cluster][cpu]--;
+ if (hip04_cpu_table[cluster][cpu] == 1) {
+@@ -162,7 +162,7 @@
+
+ last_man = hip04_cluster_is_down(cluster);
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ /* Since it's Cortex A15, disable L2 prefetching. */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+@@ -173,7 +173,7 @@
+ hip04_set_snoop_filter(cluster, 0);
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ v7_exit_coherency_flush(louis);
+ }
+
+@@ -192,7 +192,7 @@
+ cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
+
+ count = TIMEOUT_MSEC / POLL_MSEC;
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ for (tries = 0; tries < count; tries++) {
+ if (hip04_cpu_table[cluster][cpu]) {
+ ret = -EBUSY;
+@@ -202,10 +202,10 @@
+ data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
+ if (data & CORE_WFI_STATUS(cpu))
+ break;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ /* Wait for clean L2 when the whole cluster is down. */
+ msleep(POLL_MSEC);
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ }
+ if (tries >= count)
+ goto err;
+@@ -220,10 +220,10 @@
+ }
+ if (tries >= count)
+ goto err;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 0;
+ err:
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return ret;
+ }
+
+@@ -235,10 +235,10 @@
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ if (!hip04_cpu_table[cluster][cpu])
+ hip04_cpu_table[cluster][cpu] = 1;
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
+diff -Nur linux-4.1.6.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.6/arch/arm/mach-omap2/omap-smp.c
+--- linux-4.1.6.orig/arch/arm/mach-omap2/omap-smp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-omap2/omap-smp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -43,7 +43,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -74,8 +74,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -89,7 +89,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -166,7 +166,7 @@
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.6/arch/arm/mach-prima2/platsmp.c
+--- linux-4.1.6.orig/arch/arm/mach-prima2/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-prima2/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -22,7 +22,7 @@
+
+ static void __iomem *clk_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sirfsoc_secondary_init(unsigned int cpu)
+ {
+@@ -36,8 +36,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static const struct of_device_id clk_ids[] = {
+@@ -75,7 +75,7 @@
+ /* make sure write buffer is drained */
+ mb();
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -107,7 +107,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.6/arch/arm/mach-qcom/platsmp.c
+--- linux-4.1.6.orig/arch/arm/mach-qcom/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-qcom/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -46,7 +46,7 @@
+
+ extern void secondary_startup_arm(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void __ref qcom_cpu_die(unsigned int cpu)
+@@ -60,8 +60,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int scss_release_secondary(unsigned int cpu)
+@@ -284,7 +284,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+@@ -297,7 +297,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return ret;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-spear/platsmp.c linux-4.1.6/arch/arm/mach-spear/platsmp.c
+--- linux-4.1.6.orig/arch/arm/mach-spear/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-spear/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -32,7 +32,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+
+@@ -47,8 +47,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -59,7 +59,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -84,7 +84,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-sti/platsmp.c linux-4.1.6/arch/arm/mach-sti/platsmp.c
+--- linux-4.1.6.orig/arch/arm/mach-sti/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-sti/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -34,7 +34,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sti_secondary_init(unsigned int cpu)
+ {
+@@ -49,8 +49,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -61,7 +61,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -92,7 +92,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.6/arch/arm/mach-ux500/platsmp.c
+--- linux-4.1.6.orig/arch/arm/mach-ux500/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mach-ux500/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+@@ -51,7 +51,7 @@
+ return NULL;
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void ux500_secondary_init(unsigned int cpu)
+ {
+@@ -64,8 +64,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -76,7 +76,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -97,7 +97,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mm/fault.c linux-4.1.6/arch/arm/mm/fault.c
+--- linux-4.1.6.orig/arch/arm/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mm/fault.c 2015-09-08 23:49:03.498378314 +0200
+@@ -276,7 +276,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (faulthandler_disabled() || !mm)
+ goto no_context;
+
+ if (user_mode(regs))
+@@ -430,6 +430,9 @@
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -497,6 +500,9 @@
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm/mm/highmem.c linux-4.1.6/arch/arm/mm/highmem.c
+--- linux-4.1.6.orig/arch/arm/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/mm/highmem.c 2015-09-08 23:49:03.498378314 +0200
+@@ -54,11 +54,13 @@
+
+ void *kmap_atomic(struct page *page)
+ {
++ pte_t pte = mk_pte(page, kmap_prot);
+ unsigned int idx;
+ unsigned long vaddr;
+ void *kmap;
+ int type;
+
++ preempt_disable_nort();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+@@ -92,7 +94,10 @@
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
+ */
+- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
+@@ -109,27 +114,33 @@
+
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr != __fix_to_virt(idx));
+- set_fixmap_pte(idx, __pte(0));
+ #else
+ (void) idx; /* to kill a warning */
+ #endif
++ set_fixmap_pte(idx, __pte(0));
+ kmap_atomic_idx_pop();
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+ }
+ pagefault_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++ pte_t pte = pfn_pte(pfn, kmap_prot);
+ unsigned long vaddr;
+ int idx, type;
+ struct page *page = pfn_to_page(pfn);
+
++ preempt_disable_nort();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+@@ -140,7 +151,10 @@
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
+ #endif
+- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
+@@ -154,3 +168,28 @@
+
+ return pte_page(get_fixmap_pte(vaddr));
+ }
++
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
++
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_fixmap_pte(idx, __pte(0));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_fixmap_pte(idx, next_p->kmap_pte[i]);
++ }
++}
++#endif
+diff -Nur linux-4.1.6.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.6/arch/arm/plat-versatile/platsmp.c
+--- linux-4.1.6.orig/arch/arm/plat-versatile/platsmp.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm/plat-versatile/platsmp.c 2015-09-08 23:49:03.502377871 +0200
+@@ -30,7 +30,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void versatile_secondary_init(unsigned int cpu)
+ {
+@@ -43,8 +43,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -55,7 +55,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -85,7 +85,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.1.6.orig/arch/arm64/include/asm/futex.h linux-4.1.6/arch/arm64/include/asm/futex.h
+--- linux-4.1.6.orig/arch/arm64/include/asm/futex.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm64/include/asm/futex.h 2015-09-08 23:49:03.502377871 +0200
+@@ -58,7 +58,7 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- pagefault_disable(); /* implies preempt_disable() */
++ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+@@ -85,7 +85,7 @@
+ ret = -ENOSYS;
+ }
+
+- pagefault_enable(); /* subsumes preempt_enable() */
++ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+diff -Nur linux-4.1.6.orig/arch/arm64/include/asm/thread_info.h linux-4.1.6/arch/arm64/include/asm/thread_info.h
+--- linux-4.1.6.orig/arch/arm64/include/asm/thread_info.h 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm64/include/asm/thread_info.h 2015-09-08 23:49:03.502377871 +0200
+@@ -47,6 +47,7 @@
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ int cpu; /* cpu */
+ };
+
+@@ -101,6 +102,7 @@
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
++#define TIF_NEED_RESCHED_LAZY 4
+ #define TIF_NOHZ 7
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+@@ -117,6 +119,7 @@
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_NOHZ (1 << TIF_NOHZ)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+diff -Nur linux-4.1.6.orig/arch/arm64/Kconfig linux-4.1.6/arch/arm64/Kconfig
+--- linux-4.1.6.orig/arch/arm64/Kconfig 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm64/Kconfig 2015-09-08 23:49:03.502377871 +0200
+@@ -69,8 +69,10 @@
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
++ select HAVE_PREEMPT_LAZY
+ select HAVE_SYSCALL_TRACEPOINTS
+ select IRQ_DOMAIN
++ select IRQ_FORCED_THREADING
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+ select OF
+diff -Nur linux-4.1.6.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.6/arch/arm64/kernel/asm-offsets.c
+--- linux-4.1.6.orig/arch/arm64/kernel/asm-offsets.c 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm64/kernel/asm-offsets.c 2015-09-08 23:49:03.502377871 +0200
+@@ -35,6 +35,7 @@
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff -Nur linux-4.1.6.orig/arch/arm64/kernel/entry.S linux-4.1.6/arch/arm64/kernel/entry.S
+--- linux-4.1.6.orig/arch/arm64/kernel/entry.S 2015-08-17 05:52:51.000000000 +0200
++++ linux-4.1.6/arch/arm64/kernel/entry.S 2015-09-08 23:49:03.502377871 +0200
+@@ -367,11 +367,16 @@
+ #ifdef CONFIG_PREEMPT
+ get_thread_info tsk
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+- cbnz w24, 1f // preempt count != 0
++ cbnz w24, 2f // preempt count != 0
+ ldr x0, [tsk, #TI_FLAGS] // get flags
+- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+- bl el1_preempt
++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
++
++ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
++ cbnz w24, 2f // preempt lazy count != 0
++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
+ 1:
++ bl el1_preempt
++2:
+ #endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_on
+@@ -385,6 +390,7 @@
+ 1: bl preempt_schedule_irq // irq