diff options
Diffstat (limited to 'target/linux/patches/4.9.28/patch-realtime')
-rw-r--r-- | target/linux/patches/4.9.28/patch-realtime | 26970 |
1 files changed, 26970 insertions, 0 deletions
diff --git a/target/linux/patches/4.9.28/patch-realtime b/target/linux/patches/4.9.28/patch-realtime new file mode 100644 index 000000000..ab9028ddd --- /dev/null +++ b/target/linux/patches/4.9.28/patch-realtime @@ -0,0 +1,26970 @@ +diff -Nur linux-4.9.28.orig/arch/arm/include/asm/irq.h linux-4.9.28/arch/arm/include/asm/irq.h +--- linux-4.9.28.orig/arch/arm/include/asm/irq.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/include/asm/irq.h 2017-05-19 03:37:25.122174217 +0200 +@@ -22,6 +22,8 @@ + #endif + + #ifndef __ASSEMBLY__ ++#include <linux/cpumask.h> ++ + struct irqaction; + struct pt_regs; + extern void migrate_irqs(void); +diff -Nur linux-4.9.28.orig/arch/arm/include/asm/switch_to.h linux-4.9.28/arch/arm/include/asm/switch_to.h +--- linux-4.9.28.orig/arch/arm/include/asm/switch_to.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/include/asm/switch_to.h 2017-05-19 03:37:25.122174217 +0200 +@@ -3,6 +3,13 @@ + + #include <linux/thread_info.h> + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + /* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb +@@ -25,6 +32,7 @@ + #define switch_to(prev,next,last) \ + do { \ + __complete_pending_tlbi(); \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +diff -Nur linux-4.9.28.orig/arch/arm/include/asm/thread_info.h linux-4.9.28/arch/arm/include/asm/thread_info.h +--- linux-4.9.28.orig/arch/arm/include/asm/thread_info.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/include/asm/thread_info.h 2017-05-19 03:37:25.122174217 +0200 +@@ -49,6 +49,7 @@ + struct thread_info { + unsigned long flags; /* low level flags */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + __u32 cpu; /* cpu */ +@@ -142,7 +143,8 @@ + #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ + #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ + #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ ++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ ++#define TIF_NEED_RESCHED_LAZY 7 + + #define TIF_NOHZ 12 /* in adaptive nohz mode */ + #define TIF_USING_IWMMXT 17 +@@ -152,6 +154,7 @@ + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_UPROBE (1 << TIF_UPROBE) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +@@ -167,7 +170,8 @@ + * Change these and you break ASM code in entry-common.S + */ + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +- _TIF_NOTIFY_RESUME | _TIF_UPROBE) ++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ ++ _TIF_NEED_RESCHED_LAZY) + + #endif /* __KERNEL__ */ + #endif /* __ASM_ARM_THREAD_INFO_H */ +diff -Nur linux-4.9.28.orig/arch/arm/Kconfig linux-4.9.28/arch/arm/Kconfig +--- linux-4.9.28.orig/arch/arm/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/Kconfig 2017-05-19 03:37:25.122174217 +0200 +@@ -36,7 +36,7 @@ + select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 + select HAVE_ARCH_HARDENED_USERCOPY +- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU ++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE + select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_MMAP_RND_BITS if MMU + select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) +@@ -75,6 +75,7 @@ + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS +diff -Nur linux-4.9.28.orig/arch/arm/kernel/asm-offsets.c linux-4.9.28/arch/arm/kernel/asm-offsets.c +--- linux-4.9.28.orig/arch/arm/kernel/asm-offsets.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/asm-offsets.c 2017-05-19 03:37:25.122174217 +0200 +@@ -65,6 +65,7 @@ + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +diff -Nur linux-4.9.28.orig/arch/arm/kernel/entry-armv.S linux-4.9.28/arch/arm/kernel/entry-armv.S +--- linux-4.9.28.orig/arch/arm/kernel/entry-armv.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/entry-armv.S 2017-05-19 03:37:25.122174217 +0200 +@@ -220,11 +220,18 @@ + + #ifdef CONFIG_PREEMPT + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count +- ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 ++ bne 1f @ return from exeption ++ ldr r0, [tsk, #TI_FLAGS] @ get flags ++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set ++ blne svc_preempt @ preempt! ++ ++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r8, #0 @ if preempt lazy count != 0 + movne r0, #0 @ force flags to 0 +- tst r0, #_TIF_NEED_RESCHED ++ tst r0, #_TIF_NEED_RESCHED_LAZY + blne svc_preempt ++1: + #endif + + svc_exit r5, irq = 1 @ return from exception +@@ -239,8 +246,14 @@ + 1: bl preempt_schedule_irq @ irq en/disable is done inside + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS + tst r0, #_TIF_NEED_RESCHED ++ bne 1b ++ tst r0, #_TIF_NEED_RESCHED_LAZY + reteq r8 @ go again +- b 1b ++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r0, #0 @ if preempt lazy count != 0 ++ beq 1b ++ ret r8 @ go again ++ + #endif + + __und_fault: +diff -Nur linux-4.9.28.orig/arch/arm/kernel/entry-common.S linux-4.9.28/arch/arm/kernel/entry-common.S +--- linux-4.9.28.orig/arch/arm/kernel/entry-common.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/entry-common.S 2017-05-19 03:37:25.122174217 +0200 +@@ -36,7 +36,9 @@ + UNWIND(.cantunwind ) + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing +- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne fast_work_pending ++ tst r1, #_TIF_SECCOMP + bne fast_work_pending + + /* perform architecture specific actions before user return */ +@@ -62,8 +64,11 @@ + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing +- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne do_slower_path ++ tst r1, #_TIF_SECCOMP + beq no_work_pending ++do_slower_path: + UNWIND(.fnend ) + ENDPROC(ret_fast_syscall) + +diff -Nur linux-4.9.28.orig/arch/arm/kernel/patch.c linux-4.9.28/arch/arm/kernel/patch.c +--- linux-4.9.28.orig/arch/arm/kernel/patch.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/patch.c 2017-05-19 03:37:25.122174217 +0200 +@@ -15,7 +15,7 @@ + unsigned int insn; + }; + +-static DEFINE_SPINLOCK(patch_lock); ++static DEFINE_RAW_SPINLOCK(patch_lock); + + static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) + __acquires(&patch_lock) +@@ -32,7 +32,7 @@ + return addr; + + if (flags) +- spin_lock_irqsave(&patch_lock, *flags); ++ raw_spin_lock_irqsave(&patch_lock, *flags); + else + __acquire(&patch_lock); + +@@ -47,7 +47,7 @@ + clear_fixmap(fixmap); + + if (flags) +- spin_unlock_irqrestore(&patch_lock, *flags); ++ raw_spin_unlock_irqrestore(&patch_lock, *flags); + else + __release(&patch_lock); + } +diff -Nur linux-4.9.28.orig/arch/arm/kernel/process.c linux-4.9.28/arch/arm/kernel/process.c +--- linux-4.9.28.orig/arch/arm/kernel/process.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/process.c 2017-05-19 03:37:25.122174217 +0200 +@@ -322,6 +322,30 @@ + } + + #ifdef CONFIG_MMU ++/* ++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not ++ * initialized by pgtable_page_ctor() then a coredump of the vector page will ++ * fail. ++ */ ++static int __init vectors_user_mapping_init_page(void) ++{ ++ struct page *page; ++ unsigned long addr = 0xffff0000; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ page = pmd_page(*(pmd)); ++ ++ pgtable_page_ctor(page); ++ ++ return 0; ++} ++late_initcall(vectors_user_mapping_init_page); ++ + #ifdef CONFIG_KUSER_HELPERS + /* + * The vectors page is always readable from user space for the +diff -Nur linux-4.9.28.orig/arch/arm/kernel/signal.c linux-4.9.28/arch/arm/kernel/signal.c +--- linux-4.9.28.orig/arch/arm/kernel/signal.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/signal.c 2017-05-19 03:37:25.122174217 +0200 +@@ -572,7 +572,8 @@ + */ + trace_hardirqs_off(); + do { +- if (likely(thread_flags & _TIF_NEED_RESCHED)) { ++ if (likely(thread_flags & (_TIF_NEED_RESCHED | ++ _TIF_NEED_RESCHED_LAZY))) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) +diff -Nur linux-4.9.28.orig/arch/arm/kernel/smp.c linux-4.9.28/arch/arm/kernel/smp.c +--- linux-4.9.28.orig/arch/arm/kernel/smp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/smp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -234,8 +234,6 @@ + flush_cache_louis(); + local_flush_tlb_all(); + +- clear_tasks_mm_cpumask(cpu); +- + return 0; + } + +@@ -251,6 +249,9 @@ + pr_err("CPU%u: cpu didn't die\n", cpu); + return; + } ++ ++ clear_tasks_mm_cpumask(cpu); ++ + pr_notice("CPU%u: shutdown\n", cpu); + + /* +diff -Nur linux-4.9.28.orig/arch/arm/kernel/unwind.c linux-4.9.28/arch/arm/kernel/unwind.c +--- linux-4.9.28.orig/arch/arm/kernel/unwind.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kernel/unwind.c 2017-05-19 03:37:25.122174217 +0200 +@@ -93,7 +93,7 @@ + static const struct unwind_idx *__origin_unwind_idx; + extern const struct unwind_idx __stop_unwind_idx[]; + +-static DEFINE_SPINLOCK(unwind_lock); ++static DEFINE_RAW_SPINLOCK(unwind_lock); + static LIST_HEAD(unwind_tables); + + /* Convert a prel31 symbol to an absolute address */ +@@ -201,7 +201,7 @@ + /* module unwind tables */ + struct unwind_table *table; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_for_each_entry(table, &unwind_tables, list) { + if (addr >= table->begin_addr && + addr < table->end_addr) { +@@ -213,7 +213,7 @@ + break; + } + } +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + } + + pr_debug("%s: idx = %p\n", __func__, idx); +@@ -529,9 +529,9 @@ + tab->begin_addr = text_addr; + tab->end_addr = text_addr + text_size; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_add_tail(&tab->list, &unwind_tables); +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + + return tab; + } +@@ -543,9 +543,9 @@ + if (!tab) + return; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_del(&tab->list); +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + + kfree(tab); + } +diff -Nur linux-4.9.28.orig/arch/arm/kvm/arm.c linux-4.9.28/arch/arm/kvm/arm.c +--- linux-4.9.28.orig/arch/arm/kvm/arm.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/kvm/arm.c 2017-05-19 03:37:25.122174217 +0200 +@@ -619,7 +619,7 @@ + * involves poking the GIC, which must be done in a + * non-preemptible context. + */ +- preempt_disable(); ++ migrate_disable(); + kvm_pmu_flush_hwstate(vcpu); + kvm_timer_flush_hwstate(vcpu); + kvm_vgic_flush_hwstate(vcpu); +@@ -640,7 +640,7 @@ + kvm_pmu_sync_hwstate(vcpu); + kvm_timer_sync_hwstate(vcpu); + kvm_vgic_sync_hwstate(vcpu); +- preempt_enable(); ++ migrate_enable(); + continue; + } + +@@ -696,7 +696,7 @@ + + kvm_vgic_sync_hwstate(vcpu); + +- preempt_enable(); ++ migrate_enable(); + + ret = handle_exit(vcpu, run, ret); + } +diff -Nur linux-4.9.28.orig/arch/arm/mach-exynos/platsmp.c linux-4.9.28/arch/arm/mach-exynos/platsmp.c +--- linux-4.9.28.orig/arch/arm/mach-exynos/platsmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-exynos/platsmp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -229,7 +229,7 @@ + return (void __iomem *)(S5P_VA_SCU); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void exynos_secondary_init(unsigned int cpu) + { +@@ -242,8 +242,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) +@@ -307,7 +307,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -334,7 +334,7 @@ + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -380,7 +380,7 @@ + * calibrations, then wait for it to finish + */ + fail: +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? ret : 0; + } +diff -Nur linux-4.9.28.orig/arch/arm/mach-hisi/platmcpm.c linux-4.9.28/arch/arm/mach-hisi/platmcpm.c +--- linux-4.9.28.orig/arch/arm/mach-hisi/platmcpm.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-hisi/platmcpm.c 2017-05-19 03:37:25.122174217 +0200 +@@ -61,7 +61,7 @@ + + static void __iomem *sysctrl, *fabric; + static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + static u32 fabric_phys_addr; + /* + * [0]: bootwrapper physical address +@@ -113,7 +113,7 @@ + if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) + return -EINVAL; + +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + + if (hip04_cpu_table[cluster][cpu]) + goto out; +@@ -147,7 +147,7 @@ + + out: + hip04_cpu_table[cluster][cpu]++; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + + return 0; + } +@@ -162,11 +162,11 @@ + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + hip04_cpu_table[cluster][cpu]--; + if (hip04_cpu_table[cluster][cpu] == 1) { + /* A power_up request went ahead of us. */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return; + } else if (hip04_cpu_table[cluster][cpu] > 1) { + pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); +@@ -174,7 +174,7 @@ + } + + last_man = hip04_cluster_is_down(cluster); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + if (last_man) { + /* Since it's Cortex A15, disable L2 prefetching. */ + asm volatile( +@@ -203,7 +203,7 @@ + cpu >= HIP04_MAX_CPUS_PER_CLUSTER); + + count = TIMEOUT_MSEC / POLL_MSEC; +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + for (tries = 0; tries < count; tries++) { + if (hip04_cpu_table[cluster][cpu]) + goto err; +@@ -211,10 +211,10 @@ + data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); + if (data & CORE_WFI_STATUS(cpu)) + break; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + /* Wait for clean L2 when the whole cluster is down. */ + msleep(POLL_MSEC); +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + } + if (tries >= count) + goto err; +@@ -231,10 +231,10 @@ + goto err; + if (hip04_cluster_is_down(cluster)) + hip04_set_snoop_filter(cluster, 0); +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 1; + err: +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 0; + } + #endif +diff -Nur linux-4.9.28.orig/arch/arm/mach-omap2/omap-smp.c linux-4.9.28/arch/arm/mach-omap2/omap-smp.c +--- linux-4.9.28.orig/arch/arm/mach-omap2/omap-smp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-omap2/omap-smp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -64,7 +64,7 @@ + .startup_addr = omap5_secondary_startup, + }; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __iomem *omap4_get_scu_base(void) + { +@@ -131,8 +131,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -146,7 +146,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -223,7 +223,7 @@ + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +diff -Nur linux-4.9.28.orig/arch/arm/mach-prima2/platsmp.c linux-4.9.28/arch/arm/mach-prima2/platsmp.c +--- linux-4.9.28.orig/arch/arm/mach-prima2/platsmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-prima2/platsmp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -22,7 +22,7 @@ + + static void __iomem *clk_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void sirfsoc_secondary_init(unsigned int cpu) + { +@@ -36,8 +36,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static const struct of_device_id clk_ids[] = { +@@ -75,7 +75,7 @@ + /* make sure write buffer is drained */ + mb(); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -107,7 +107,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.9.28.orig/arch/arm/mach-qcom/platsmp.c linux-4.9.28/arch/arm/mach-qcom/platsmp.c +--- linux-4.9.28.orig/arch/arm/mach-qcom/platsmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-qcom/platsmp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -46,7 +46,7 @@ + + extern void secondary_startup_arm(void); + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + #ifdef CONFIG_HOTPLUG_CPU + static void qcom_cpu_die(unsigned int cpu) +@@ -60,8 +60,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int scss_release_secondary(unsigned int cpu) +@@ -284,7 +284,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Send the secondary CPU a soft interrupt, thereby causing +@@ -297,7 +297,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return ret; + } +diff -Nur linux-4.9.28.orig/arch/arm/mach-spear/platsmp.c linux-4.9.28/arch/arm/mach-spear/platsmp.c +--- linux-4.9.28.orig/arch/arm/mach-spear/platsmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-spear/platsmp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -32,7 +32,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __iomem *scu_base = IOMEM(VA_SCU_BASE); + +@@ -47,8 +47,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -59,7 +59,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -84,7 +84,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.9.28.orig/arch/arm/mach-sti/platsmp.c linux-4.9.28/arch/arm/mach-sti/platsmp.c +--- linux-4.9.28.orig/arch/arm/mach-sti/platsmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mach-sti/platsmp.c 2017-05-19 03:37:25.122174217 +0200 +@@ -35,7 +35,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void sti_secondary_init(unsigned int cpu) + { +@@ -48,8 +48,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -60,7 +60,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -91,7 +91,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.9.28.orig/arch/arm/mm/fault.c linux-4.9.28/arch/arm/mm/fault.c +--- linux-4.9.28.orig/arch/arm/mm/fault.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mm/fault.c 2017-05-19 03:37:25.122174217 +0200 +@@ -430,6 +430,9 @@ + if (addr < TASK_SIZE) + return do_page_fault(addr, fsr, regs); + ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); ++ + if (user_mode(regs)) + goto bad_area; + +@@ -497,6 +500,9 @@ + static int + do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + { ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); ++ + do_bad_area(addr, fsr, regs); + return 0; + } +diff -Nur linux-4.9.28.orig/arch/arm/mm/highmem.c linux-4.9.28/arch/arm/mm/highmem.c +--- linux-4.9.28.orig/arch/arm/mm/highmem.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/mm/highmem.c 2017-05-19 03:37:25.126174470 +0200 +@@ -34,6 +34,11 @@ + return *ptep; + } + ++static unsigned int fixmap_idx(int type) ++{ ++ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++} ++ + void *kmap(struct page *page) + { + might_sleep(); +@@ -54,12 +59,13 @@ + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; + int type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); +@@ -79,7 +85,7 @@ + + type = kmap_atomic_idx_push(); + +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + /* +@@ -93,7 +99,10 @@ + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } +@@ -106,44 +115,75 @@ + + if (kvaddr >= (void *)FIXADDR_START) { + type = kmap_atomic_idx(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(idx)); +- set_fixmap_pte(idx, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_fixmap_pte(idx, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ + kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + } + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + struct page *page = pfn_to_page(pfn); + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); + #endif +- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ set_fixmap_pte(idx, __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_fixmap_pte(idx, next_p->kmap_pte[i]); ++ } ++} ++#endif +diff -Nur linux-4.9.28.orig/arch/arm/plat-versatile/platsmp.c linux-4.9.28/arch/arm/plat-versatile/platsmp.c +--- linux-4.9.28.orig/arch/arm/plat-versatile/platsmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm/plat-versatile/platsmp.c 2017-05-19 03:37:25.126174470 +0200 +@@ -32,7 +32,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void versatile_secondary_init(unsigned int cpu) + { +@@ -45,8 +45,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -57,7 +57,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -87,7 +87,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.9.28.orig/arch/arm64/include/asm/thread_info.h linux-4.9.28/arch/arm64/include/asm/thread_info.h +--- linux-4.9.28.orig/arch/arm64/include/asm/thread_info.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm64/include/asm/thread_info.h 2017-05-19 03:37:25.126174470 +0200 +@@ -49,6 +49,7 @@ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + int cpu; /* cpu */ + }; + +@@ -112,6 +113,7 @@ + #define TIF_NEED_RESCHED 1 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ + #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ ++#define TIF_NEED_RESCHED_LAZY 4 + #define TIF_NOHZ 7 + #define TIF_SYSCALL_TRACE 8 + #define TIF_SYSCALL_AUDIT 9 +@@ -127,6 +129,7 @@ + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) + #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_NOHZ (1 << TIF_NOHZ) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +@@ -135,7 +138,9 @@ + #define _TIF_32BIT (1 << TIF_32BIT) + + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) ++ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ ++ _TIF_NEED_RESCHED_LAZY) ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ +diff -Nur linux-4.9.28.orig/arch/arm64/Kconfig linux-4.9.28/arch/arm64/Kconfig +--- linux-4.9.28.orig/arch/arm64/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm64/Kconfig 2017-05-19 03:37:25.126174470 +0200 +@@ -91,6 +91,7 @@ + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RCU_TABLE_FREE + select HAVE_SYSCALL_TRACEPOINTS +@@ -704,7 +705,7 @@ + + config XEN + bool "Xen guest support on ARM64" +- depends on ARM64 && OF ++ depends on ARM64 && OF && !PREEMPT_RT_FULL + select SWIOTLB_XEN + select PARAVIRT + help +diff -Nur linux-4.9.28.orig/arch/arm64/kernel/asm-offsets.c linux-4.9.28/arch/arm64/kernel/asm-offsets.c +--- linux-4.9.28.orig/arch/arm64/kernel/asm-offsets.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm64/kernel/asm-offsets.c 2017-05-19 03:37:25.126174470 +0200 +@@ -38,6 +38,7 @@ + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +diff -Nur linux-4.9.28.orig/arch/arm64/kernel/entry.S linux-4.9.28/arch/arm64/kernel/entry.S +--- linux-4.9.28.orig/arch/arm64/kernel/entry.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm64/kernel/entry.S 2017-05-19 03:37:25.126174470 +0200 +@@ -428,11 +428,16 @@ + + #ifdef CONFIG_PREEMPT + ldr w24, [tsk, #TI_PREEMPT] // get preempt count +- cbnz w24, 1f // preempt count != 0 ++ cbnz w24, 2f // preempt count != 0 + ldr x0, [tsk, #TI_FLAGS] // get flags +- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? +- bl el1_preempt ++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? ++ ++ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count ++ cbnz w24, 2f // preempt lazy count != 0 ++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? + 1: ++ bl el1_preempt ++2: + #endif + #ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_on +@@ -446,6 +451,7 @@ + 1: bl preempt_schedule_irq // irq en/disable is done inside + ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS + tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ++ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? + ret x24 + #endif + +diff -Nur linux-4.9.28.orig/arch/arm64/kernel/signal.c linux-4.9.28/arch/arm64/kernel/signal.c +--- linux-4.9.28.orig/arch/arm64/kernel/signal.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/arm64/kernel/signal.c 2017-05-19 03:37:25.126174470 +0200 +@@ -409,7 +409,7 @@ + */ + trace_hardirqs_off(); + do { +- if (thread_flags & _TIF_NEED_RESCHED) { ++ if (thread_flags & _TIF_NEED_RESCHED_MASK) { + schedule(); + } else { + local_irq_enable(); +diff -Nur linux-4.9.28.orig/arch/Kconfig linux-4.9.28/arch/Kconfig +--- linux-4.9.28.orig/arch/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/Kconfig 2017-05-19 03:37:25.122174217 +0200 +@@ -9,6 +9,7 @@ + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +@@ -52,6 +53,7 @@ + config JUMP_LABEL + bool "Optimize very unlikely/likely branches" + depends on HAVE_ARCH_JUMP_LABEL ++ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) + help + This option enables a transparent branch optimization that + makes certain almost-always-true or almost-always-false branch +diff -Nur linux-4.9.28.orig/arch/mips/Kconfig linux-4.9.28/arch/mips/Kconfig +--- linux-4.9.28.orig/arch/mips/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/mips/Kconfig 2017-05-19 03:37:25.126174470 +0200 +@@ -2515,7 +2515,7 @@ + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool +diff -Nur linux-4.9.28.orig/arch/powerpc/include/asm/thread_info.h linux-4.9.28/arch/powerpc/include/asm/thread_info.h +--- linux-4.9.28.orig/arch/powerpc/include/asm/thread_info.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/include/asm/thread_info.h 2017-05-19 03:37:25.126174470 +0200 +@@ -43,6 +43,8 @@ + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + unsigned long local_flags; /* private flags for thread */ + #ifdef CONFIG_LIVEPATCH + unsigned long *livepatch_sp; +@@ -88,8 +90,7 @@ + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ + #define TIF_SIGPENDING 1 /* signal pending */ + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling +- TIF_NEED_RESCHED */ ++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ + #define TIF_32BIT 4 /* 32 bit binary */ + #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ +@@ -107,6 +108,8 @@ + #if defined(CONFIG_PPC64) + #define TIF_ELF2ABI 18 /* function descriptors must die! */ + #endif ++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling ++ TIF_NEED_RESCHED */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) +@@ -125,14 +128,16 @@ + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) + #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) + #define _TIF_NOHZ (1<<TIF_NOHZ) ++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) + #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ + _TIF_NOHZ) + + #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ +- _TIF_RESTORE_TM) ++ _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY) + #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + + /* Bits in local_flags */ + /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ +diff -Nur linux-4.9.28.orig/arch/powerpc/Kconfig linux-4.9.28/arch/powerpc/Kconfig +--- linux-4.9.28.orig/arch/powerpc/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/Kconfig 2017-05-19 03:37:25.126174470 +0200 +@@ -52,10 +52,11 @@ + + config RWSEM_GENERIC_SPINLOCK + bool ++ default y if PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM + bool +- default y ++ default y if !PREEMPT_RT_FULL + + config GENERIC_LOCKBREAK + bool +@@ -134,6 +135,7 @@ + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER ++ select HAVE_PREEMPT_LAZY + select HAVE_MOD_ARCH_SPECIFIC + select MODULES_USE_ELF_RELA + select CLONE_BACKWARDS +@@ -321,7 +323,7 @@ + + config HIGHMEM + bool "High memory support" +- depends on PPC32 ++ depends on PPC32 && !PREEMPT_RT_FULL + + source kernel/Kconfig.hz + source kernel/Kconfig.preempt +diff -Nur linux-4.9.28.orig/arch/powerpc/kernel/asm-offsets.c linux-4.9.28/arch/powerpc/kernel/asm-offsets.c +--- linux-4.9.28.orig/arch/powerpc/kernel/asm-offsets.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kernel/asm-offsets.c 2017-05-19 03:37:25.126174470 +0200 +@@ -156,6 +156,7 @@ + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + +diff -Nur linux-4.9.28.orig/arch/powerpc/kernel/entry_32.S linux-4.9.28/arch/powerpc/kernel/entry_32.S +--- linux-4.9.28.orig/arch/powerpc/kernel/entry_32.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kernel/entry_32.S 2017-05-19 03:37:25.126174470 +0200 +@@ -835,7 +835,14 @@ + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ + bne restore + andi. r8,r8,_TIF_NEED_RESCHED ++ bne+ 1f ++ lwz r0,TI_PREEMPT_LAZY(r9) ++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ ++ bne restore ++ lwz r0,TI_FLAGS(r9) ++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY + beq+ restore ++1: + lwz r3,_MSR(r1) + andi. r0,r3,MSR_EE /* interrupts off? */ + beq restore /* don't schedule if so */ +@@ -846,11 +853,11 @@ + */ + bl trace_hardirqs_off + #endif +-1: bl preempt_schedule_irq ++2: bl preempt_schedule_irq + CURRENT_THREAD_INFO(r9, r1) + lwz r3,TI_FLAGS(r9) +- andi. r0,r3,_TIF_NEED_RESCHED +- bne- 1b ++ andi. r0,r3,_TIF_NEED_RESCHED_MASK ++ bne- 2b + #ifdef CONFIG_TRACE_IRQFLAGS + /* And now, to properly rebalance the above, we tell lockdep they + * are being turned back on, which will happen when we return +@@ -1171,7 +1178,7 @@ + #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ + + do_work: /* r10 contains MSR_KERNEL here */ +- andi. r0,r9,_TIF_NEED_RESCHED ++ andi. r0,r9,_TIF_NEED_RESCHED_MASK + beq do_user_signal + + do_resched: /* r10 contains MSR_KERNEL here */ +@@ -1192,7 +1199,7 @@ + MTMSRD(r10) /* disable interrupts */ + CURRENT_THREAD_INFO(r9, r1) + lwz r9,TI_FLAGS(r9) +- andi. r0,r9,_TIF_NEED_RESCHED ++ andi. r0,r9,_TIF_NEED_RESCHED_MASK + bne- do_resched + andi. r0,r9,_TIF_USER_WORK_MASK + beq restore_user +diff -Nur linux-4.9.28.orig/arch/powerpc/kernel/entry_64.S linux-4.9.28/arch/powerpc/kernel/entry_64.S +--- linux-4.9.28.orig/arch/powerpc/kernel/entry_64.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kernel/entry_64.S 2017-05-19 03:37:25.126174470 +0200 +@@ -656,7 +656,7 @@ + bl restore_math + b restore + #endif +-1: andi. r0,r4,_TIF_NEED_RESCHED ++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK + beq 2f + bl restore_interrupts + SCHEDULE_USER +@@ -718,10 +718,18 @@ + + #ifdef CONFIG_PREEMPT + /* Check if we need to preempt */ ++ lwz r8,TI_PREEMPT(r9) ++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ ++ bne restore + andi. r0,r4,_TIF_NEED_RESCHED ++ bne+ check_count ++ ++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY + beq+ restore ++ lwz r8,TI_PREEMPT_LAZY(r9) ++ + /* Check that preempt_count() == 0 and interrupts are enabled */ +- lwz r8,TI_PREEMPT(r9) ++check_count: + cmpwi cr1,r8,0 + ld r0,SOFTE(r1) + cmpdi r0,0 +@@ -738,7 +746,7 @@ + /* Re-test flags and eventually loop */ + CURRENT_THREAD_INFO(r9, r1) + ld r4,TI_FLAGS(r9) +- andi. r0,r4,_TIF_NEED_RESCHED ++ andi. r0,r4,_TIF_NEED_RESCHED_MASK + bne 1b + + /* +diff -Nur linux-4.9.28.orig/arch/powerpc/kernel/irq.c linux-4.9.28/arch/powerpc/kernel/irq.c +--- linux-4.9.28.orig/arch/powerpc/kernel/irq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kernel/irq.c 2017-05-19 03:37:25.126174470 +0200 +@@ -638,6 +638,7 @@ + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curtp, *irqtp; +@@ -655,6 +656,7 @@ + if (irqtp->flags) + set_bits(irqtp->flags, &curtp->flags); + } ++#endif + + irq_hw_number_t virq_to_hw(unsigned int virq) + { +diff -Nur linux-4.9.28.orig/arch/powerpc/kernel/misc_32.S linux-4.9.28/arch/powerpc/kernel/misc_32.S +--- linux-4.9.28.orig/arch/powerpc/kernel/misc_32.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kernel/misc_32.S 2017-05-19 03:37:25.126174470 +0200 +@@ -41,6 +41,7 @@ + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -57,6 +58,7 @@ + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr ++#endif + + /* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); +diff -Nur linux-4.9.28.orig/arch/powerpc/kernel/misc_64.S linux-4.9.28/arch/powerpc/kernel/misc_64.S +--- linux-4.9.28.orig/arch/powerpc/kernel/misc_64.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kernel/misc_64.S 2017-05-19 03:37:25.126174470 +0200 +@@ -31,6 +31,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -41,6 +42,7 @@ + ld r0,16(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_do_irq) + mflr r0 +diff -Nur linux-4.9.28.orig/arch/powerpc/kvm/Kconfig linux-4.9.28/arch/powerpc/kvm/Kconfig +--- linux-4.9.28.orig/arch/powerpc/kvm/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/kvm/Kconfig 2017-05-19 03:37:25.126174470 +0200 +@@ -175,6 +175,7 @@ + config KVM_MPIC + bool "KVM in-kernel MPIC emulation" + depends on KVM && E500 ++ depends on !PREEMPT_RT_FULL + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_IRQ_ROUTING +diff -Nur linux-4.9.28.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.9.28/arch/powerpc/platforms/ps3/device-init.c +--- linux-4.9.28.orig/arch/powerpc/platforms/ps3/device-init.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/powerpc/platforms/ps3/device-init.c 2017-05-19 03:37:25.126174470 +0200 +@@ -752,7 +752,7 @@ + } + pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); + +- res = wait_event_interruptible(dev->done.wait, ++ res = swait_event_interruptible(dev->done.wait, + dev->done.done || kthread_should_stop()); + if (kthread_should_stop()) + res = -EINTR; +diff -Nur linux-4.9.28.orig/arch/sh/kernel/irq.c linux-4.9.28/arch/sh/kernel/irq.c +--- linux-4.9.28.orig/arch/sh/kernel/irq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/sh/kernel/irq.c 2017-05-19 03:37:25.126174470 +0200 +@@ -147,6 +147,7 @@ + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curctx; +@@ -174,6 +175,7 @@ + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +diff -Nur linux-4.9.28.orig/arch/sparc/Kconfig linux-4.9.28/arch/sparc/Kconfig +--- linux-4.9.28.orig/arch/sparc/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/sparc/Kconfig 2017-05-19 03:37:25.126174470 +0200 +@@ -194,12 +194,10 @@ + source kernel/Kconfig.hz + + config RWSEM_GENERIC_SPINLOCK +- bool +- default y if SPARC32 ++ def_bool PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM +- bool +- default y if SPARC64 ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_HWEIGHT + bool +diff -Nur linux-4.9.28.orig/arch/sparc/kernel/irq_64.c linux-4.9.28/arch/sparc/kernel/irq_64.c +--- linux-4.9.28.orig/arch/sparc/kernel/irq_64.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/sparc/kernel/irq_64.c 2017-05-19 03:37:25.126174470 +0200 +@@ -854,6 +854,7 @@ + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; +@@ -868,6 +869,7 @@ + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +diff -Nur linux-4.9.28.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.9.28/arch/x86/crypto/aesni-intel_glue.c +--- linux-4.9.28.orig/arch/x86/crypto/aesni-intel_glue.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/crypto/aesni-intel_glue.c 2017-05-19 03:37:25.130174719 +0200 +@@ -372,14 +372,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, +- nbytes & AES_BLOCK_MASK); ++ nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -396,14 +396,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -420,14 +420,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -444,14 +444,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -503,18 +503,20 @@ + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } +- kernel_fpu_end(); + + return err; + } +diff -Nur linux-4.9.28.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.9.28/arch/x86/crypto/cast5_avx_glue.c +--- linux-4.9.28.orig/arch/x86/crypto/cast5_avx_glue.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/crypto/cast5_avx_glue.c 2017-05-19 03:37:25.130174719 +0200 +@@ -59,7 +59,7 @@ + static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, + bool enc) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = CAST5_BLOCK_SIZE; + unsigned int nbytes; +@@ -75,7 +75,7 @@ + u8 *wsrc = walk->src.virt.addr; + u8 *wdst = walk->dst.virt.addr; + +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + + /* Process multi-block batch */ + if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { +@@ -103,10 +103,9 @@ + } while (nbytes >= bsize); + + done: ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -227,7 +226,7 @@ + static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -236,12 +235,11 @@ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes)) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __cbc_decrypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -311,7 +309,7 @@ + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -320,13 +318,12 @@ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __ctr_crypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- cast5_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + ctr_crypt_final(desc, &walk); + err = blkcipher_walk_done(desc, &walk, 0); +diff -Nur linux-4.9.28.orig/arch/x86/crypto/glue_helper.c linux-4.9.28/arch/x86/crypto/glue_helper.c +--- linux-4.9.28.orig/arch/x86/crypto/glue_helper.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/crypto/glue_helper.c 2017-05-19 03:37:25.130174719 +0200 +@@ -39,7 +39,7 @@ + void *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = 128 / 8; + unsigned int nbytes, i, func_bytes; +- bool fpu_enabled = false; ++ bool fpu_enabled; + int err; + + err = blkcipher_walk_virt(desc, walk); +@@ -49,7 +49,7 @@ + u8 *wdst = walk->dst.virt.addr; + + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + + for (i = 0; i < gctx->num_funcs; i++) { + func_bytes = bsize * gctx->funcs[i].num_blocks; +@@ -71,10 +71,10 @@ + } + + done: ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + +@@ -194,7 +194,7 @@ + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -203,12 +203,12 @@ + + while ((nbytes = walk.nbytes)) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); +@@ -277,7 +277,7 @@ + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -286,13 +286,12 @@ + + while ((nbytes = walk.nbytes) >= bsize) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + glue_ctr_crypt_final_128bit( + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); +@@ -347,7 +346,7 @@ + void *tweak_ctx, void *crypt_ctx) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -360,21 +359,21 @@ + + /* set minimum length to bsize, for tweak_fn */ + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, ++ desc, false, + nbytes < bsize ? bsize : nbytes); +- + /* calculate first value of T */ + tweak_fn(tweak_ctx, walk.iv, walk.iv); ++ glue_fpu_end(fpu_enabled); + + while (nbytes) { ++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, ++ desc, false, nbytes); + nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); + ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + nbytes = walk.nbytes; + } +- +- glue_fpu_end(fpu_enabled); +- + return err; + } + EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); +diff -Nur linux-4.9.28.orig/arch/x86/entry/common.c linux-4.9.28/arch/x86/entry/common.c +--- linux-4.9.28.orig/arch/x86/entry/common.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/entry/common.c 2017-05-19 03:37:25.130174719 +0200 +@@ -129,7 +129,7 @@ + + #define EXIT_TO_USERMODE_LOOP_FLAGS \ + (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ +- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) ++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY) + + static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) + { +@@ -145,9 +145,16 @@ + /* We have work to do. */ + local_irq_enable(); + +- if (cached_flags & _TIF_NEED_RESCHED) ++ if (cached_flags & _TIF_NEED_RESCHED_MASK) + schedule(); + ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (unlikely(current->forced_info.si_signo)) { ++ struct task_struct *t = current; ++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); ++ t->forced_info.si_signo = 0; ++ } ++#endif + if (cached_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + +diff -Nur linux-4.9.28.orig/arch/x86/entry/entry_32.S linux-4.9.28/arch/x86/entry/entry_32.S +--- linux-4.9.28.orig/arch/x86/entry/entry_32.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/entry/entry_32.S 2017-05-19 03:37:25.130174719 +0200 +@@ -308,8 +308,25 @@ + ENTRY(resume_kernel) + DISABLE_INTERRUPTS(CLBR_ANY) + need_resched: ++ # preempt count == 0 + NEED_RS set? + cmpl $0, PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz restore_all ++#else ++ jz test_int_off ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jne restore_all ++ ++ movl PER_CPU_VAR(current_task), %ebp ++ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? ++ jnz restore_all ++ ++ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) ++ jz restore_all ++test_int_off: ++#endif + testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? + jz restore_all + call preempt_schedule_irq +diff -Nur linux-4.9.28.orig/arch/x86/entry/entry_64.S linux-4.9.28/arch/x86/entry/entry_64.S +--- linux-4.9.28.orig/arch/x86/entry/entry_64.S 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/entry/entry_64.S 2017-05-19 03:37:25.130174719 +0200 +@@ -546,7 +546,23 @@ + bt $9, EFLAGS(%rsp) /* were interrupts off? */ + jnc 1f + 0: cmpl $0, PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz 1f ++#else ++ jz do_preempt_schedule_irq ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jnz 1f ++ ++ movq PER_CPU_VAR(current_task), %rcx ++ cmpl $0, TASK_TI_preempt_lazy_count(%rcx) ++ jnz 1f ++ ++ bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) ++ jnc 1f ++do_preempt_schedule_irq: ++#endif + call preempt_schedule_irq + jmp 0b + 1: +@@ -894,6 +910,7 @@ + jmp 2b + .previous + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(do_softirq_own_stack) + pushq %rbp +@@ -906,6 +923,7 @@ + decl PER_CPU_VAR(irq_count) + ret + END(do_softirq_own_stack) ++#endif + + #ifdef CONFIG_XEN + idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 +diff -Nur linux-4.9.28.orig/arch/x86/include/asm/preempt.h linux-4.9.28/arch/x86/include/asm/preempt.h +--- linux-4.9.28.orig/arch/x86/include/asm/preempt.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/include/asm/preempt.h 2017-05-19 03:37:25.130174719 +0200 +@@ -79,17 +79,46 @@ + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ +-static __always_inline bool __preempt_count_dec_and_test(void) ++static __always_inline bool ____preempt_count_dec_and_test(void) + { + GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); + } + ++static __always_inline bool __preempt_count_dec_and_test(void) ++{ ++ if (____preempt_count_dec_and_test()) ++ return true; ++#ifdef CONFIG_PREEMPT_LAZY ++ if (current_thread_info()->preempt_lazy_count) ++ return false; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else ++ return false; ++#endif ++} ++ + /* + * Returns true when we need to resched and can (barring IRQ state). + */ + static __always_inline bool should_resched(int preempt_offset) + { ++#ifdef CONFIG_PREEMPT_LAZY ++ u32 tmp; ++ ++ tmp = raw_cpu_read_4(__preempt_count); ++ if (tmp == preempt_offset) ++ return true; ++ ++ /* preempt count == 0 ? */ ++ tmp &= ~PREEMPT_NEED_RESCHED; ++ if (tmp) ++ return false; ++ if (current_thread_info()->preempt_lazy_count) ++ return false; ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); ++#endif + } + + #ifdef CONFIG_PREEMPT +diff -Nur linux-4.9.28.orig/arch/x86/include/asm/signal.h linux-4.9.28/arch/x86/include/asm/signal.h +--- linux-4.9.28.orig/arch/x86/include/asm/signal.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/include/asm/signal.h 2017-05-19 03:37:25.130174719 +0200 +@@ -27,6 +27,19 @@ + #define SA_IA32_ABI 0x02000000u + #define SA_X32_ABI 0x01000000u + ++/* ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. ++ */ ++#if defined(CONFIG_PREEMPT_RT_FULL) ++#define ARCH_RT_DELAYS_SIGNAL_SEND ++#endif ++ + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +diff -Nur linux-4.9.28.orig/arch/x86/include/asm/stackprotector.h linux-4.9.28/arch/x86/include/asm/stackprotector.h +--- linux-4.9.28.orig/arch/x86/include/asm/stackprotector.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/include/asm/stackprotector.h 2017-05-19 03:37:25.130174719 +0200 +@@ -59,7 +59,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -70,8 +70,15 @@ + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = rdtsc(); + canary += tsc + (tsc << 32UL); + +diff -Nur linux-4.9.28.orig/arch/x86/include/asm/thread_info.h linux-4.9.28/arch/x86/include/asm/thread_info.h +--- linux-4.9.28.orig/arch/x86/include/asm/thread_info.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/include/asm/thread_info.h 2017-05-19 03:37:25.130174719 +0200 +@@ -54,11 +54,14 @@ + + struct thread_info { + unsigned long flags; /* low level flags */ ++ int preempt_lazy_count; /* 0 => lazy preemptable ++ <0 => BUG */ + }; + + #define INIT_THREAD_INFO(tsk) \ + { \ + .flags = 0, \ ++ .preempt_lazy_count = 0, \ + } + + #define init_stack (init_thread_union.stack) +@@ -67,6 +70,10 @@ + + #include <asm/asm-offsets.h> + ++#define GET_THREAD_INFO(reg) \ ++ _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ ++ _ASM_SUB $(THREAD_SIZE),reg ; ++ + #endif + + /* +@@ -85,6 +92,7 @@ + #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SECCOMP 8 /* secure computing */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ + #define TIF_UPROBE 12 /* breakpointed or singlestepping */ + #define TIF_NOTSC 16 /* TSC is not accessible in userland */ +@@ -108,6 +116,7 @@ + #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) + #define _TIF_UPROBE (1 << TIF_UPROBE) + #define _TIF_NOTSC (1 << TIF_NOTSC) +@@ -143,6 +152,8 @@ + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++ + #define STACK_WARN (THREAD_SIZE/8) + + /* +diff -Nur linux-4.9.28.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.9.28/arch/x86/include/asm/uv/uv_bau.h +--- linux-4.9.28.orig/arch/x86/include/asm/uv/uv_bau.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/include/asm/uv/uv_bau.h 2017-05-19 03:37:25.130174719 +0200 +@@ -624,9 +624,9 @@ + cycles_t send_message; + cycles_t period_end; + cycles_t period_time; +- spinlock_t uvhub_lock; +- spinlock_t queue_lock; +- spinlock_t disable_lock; ++ raw_spinlock_t uvhub_lock; ++ raw_spinlock_t queue_lock; ++ raw_spinlock_t disable_lock; + /* tunables */ + int max_concurr; + int max_concurr_const; +@@ -815,15 +815,15 @@ + * to be lowered below the current 'v'. atomic_add_unless can only stop + * on equal. + */ +-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) ++static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) + { +- spin_lock(lock); ++ raw_spin_lock(lock); + if (atomic_read(v) >= u) { +- spin_unlock(lock); ++ raw_spin_unlock(lock); + return 0; + } + atomic_inc(v); +- spin_unlock(lock); ++ raw_spin_unlock(lock); + return 1; + } + +diff -Nur linux-4.9.28.orig/arch/x86/Kconfig linux-4.9.28/arch/x86/Kconfig +--- linux-4.9.28.orig/arch/x86/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/Kconfig 2017-05-19 03:37:25.130174719 +0200 +@@ -17,6 +17,7 @@ + ### Arch settings + config X86 + def_bool y ++ select HAVE_PREEMPT_LAZY + select ACPI_LEGACY_TABLES_LOOKUP if ACPI + select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI + select ANON_INODES +@@ -232,8 +233,11 @@ + def_bool y + depends on ISA_DMA_API + ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ + config RWSEM_XCHGADD_ALGORITHM +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_CALIBRATE_DELAY + def_bool y +@@ -897,7 +901,7 @@ + config MAXSMP + bool "Enable Maximum number of SMP Processors and NUMA Nodes" + depends on X86_64 && SMP && DEBUG_KERNEL +- select CPUMASK_OFFSTACK ++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + ---help--- + Enable maximum number of CPUS and NUMA Nodes for this architecture. + If unsure, say N. +diff -Nur linux-4.9.28.orig/arch/x86/kernel/acpi/boot.c linux-4.9.28/arch/x86/kernel/acpi/boot.c +--- linux-4.9.28.orig/arch/x86/kernel/acpi/boot.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kernel/acpi/boot.c 2017-05-19 03:37:25.130174719 +0200 +@@ -87,7 +87,9 @@ + * ->ioapic_mutex + * ->ioapic_lock + */ ++#ifdef CONFIG_X86_IO_APIC + static DEFINE_MUTEX(acpi_ioapic_lock); ++#endif + + /* -------------------------------------------------------------------------- + Boot-time Configuration +diff -Nur linux-4.9.28.orig/arch/x86/kernel/apic/io_apic.c linux-4.9.28/arch/x86/kernel/apic/io_apic.c +--- linux-4.9.28.orig/arch/x86/kernel/apic/io_apic.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kernel/apic/io_apic.c 2017-05-19 03:37:25.130174719 +0200 +@@ -1712,7 +1712,8 @@ + static inline bool ioapic_irqd_mask(struct irq_data *data) + { + /* If we are moving the irq we need to mask it */ +- if (unlikely(irqd_is_setaffinity_pending(data))) { ++ if (unlikely(irqd_is_setaffinity_pending(data) && ++ !irqd_irq_inprogress(data))) { + mask_ioapic_irq(data); + return true; + } +diff -Nur linux-4.9.28.orig/arch/x86/kernel/asm-offsets.c linux-4.9.28/arch/x86/kernel/asm-offsets.c +--- linux-4.9.28.orig/arch/x86/kernel/asm-offsets.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kernel/asm-offsets.c 2017-05-19 03:37:25.130174719 +0200 +@@ -36,6 +36,7 @@ + + BLANK(); + OFFSET(TASK_TI_flags, task_struct, thread_info.flags); ++ OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); + OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); + + BLANK(); +@@ -91,4 +92,5 @@ + + BLANK(); + DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); ++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); + } +diff -Nur linux-4.9.28.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.9.28/arch/x86/kernel/cpu/mcheck/mce.c +--- linux-4.9.28.orig/arch/x86/kernel/cpu/mcheck/mce.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kernel/cpu/mcheck/mce.c 2017-05-19 03:37:25.130174719 +0200 +@@ -41,6 +41,8 @@ + #include <linux/debugfs.h> + #include <linux/irq_work.h> + #include <linux/export.h> ++#include <linux/jiffies.h> ++#include <linux/swork.h> + #include <linux/jump_label.h> + + #include <asm/processor.h> +@@ -1307,7 +1309,7 @@ + static unsigned long check_interval = INITIAL_CHECK_INTERVAL; + + static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ +-static DEFINE_PER_CPU(struct timer_list, mce_timer); ++static DEFINE_PER_CPU(struct hrtimer, mce_timer); + + static unsigned long mce_adjust_timer_default(unsigned long interval) + { +@@ -1316,32 +1318,18 @@ + + static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; + +-static void __restart_timer(struct timer_list *t, unsigned long interval) ++static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval) + { +- unsigned long when = jiffies + interval; +- unsigned long flags; +- +- local_irq_save(flags); +- +- if (timer_pending(t)) { +- if (time_before(when, t->expires)) +- mod_timer(t, when); +- } else { +- t->expires = round_jiffies(when); +- add_timer_on(t, smp_processor_id()); +- } +- +- local_irq_restore(flags); ++ if (!interval) ++ return HRTIMER_NORESTART; ++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval))); ++ return HRTIMER_RESTART; + } + +-static void mce_timer_fn(unsigned long data) ++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); +- int cpu = smp_processor_id(); + unsigned long iv; + +- WARN_ON(cpu != data); +- + iv = __this_cpu_read(mce_next_interval); + + if (mce_available(this_cpu_ptr(&cpu_info))) { +@@ -1364,7 +1352,7 @@ + + done: + __this_cpu_write(mce_next_interval, iv); +- __restart_timer(t, iv); ++ return __restart_timer(timer, iv); + } + + /* +@@ -1372,7 +1360,7 @@ + */ + void mce_timer_kick(unsigned long interval) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); ++ struct hrtimer *t = this_cpu_ptr(&mce_timer); + unsigned long iv = __this_cpu_read(mce_next_interval); + + __restart_timer(t, interval); +@@ -1387,7 +1375,7 @@ + int cpu; + + for_each_online_cpu(cpu) +- del_timer_sync(&per_cpu(mce_timer, cpu)); ++ hrtimer_cancel(&per_cpu(mce_timer, cpu)); + } + + static void mce_do_trigger(struct work_struct *work) +@@ -1397,6 +1385,56 @@ + + static DECLARE_WORK(mce_trigger_work, mce_do_trigger); + ++static void __mce_notify_work(struct swork_event *event) ++{ ++ /* Not more than two messages every minute */ ++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); ++ ++ /* wake processes polling /dev/mcelog */ ++ wake_up_interruptible(&mce_chrdev_wait); ++ ++ /* ++ * There is no risk of missing notifications because ++ * work_pending is always cleared before the function is ++ * executed. ++ */ ++ if (mce_helper[0] && !work_pending(&mce_trigger_work)) ++ schedule_work(&mce_trigger_work); ++ ++ if (__ratelimit(&ratelimit)) ++ pr_info(HW_ERR "Machine check events logged\n"); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static bool notify_work_ready __read_mostly; ++static struct swork_event notify_work; ++ ++static int mce_notify_work_init(void) ++{ ++ int err; ++ ++ err = swork_get(); ++ if (err) ++ return err; ++ ++ INIT_SWORK(¬ify_work, __mce_notify_work); ++ notify_work_ready = true; ++ return 0; ++} ++ ++static void mce_notify_work(void) ++{ ++ if (notify_work_ready) ++ swork_queue(¬ify_work); ++} ++#else ++static void mce_notify_work(void) ++{ ++ __mce_notify_work(NULL); ++} ++static inline int mce_notify_work_init(void) { return 0; } ++#endif ++ + /* + * Notify the user(s) about new machine check events. + * Can be called from interrupt context, but not from machine check/NMI +@@ -1404,19 +1442,8 @@ + */ + int mce_notify_irq(void) + { +- /* Not more than two messages every minute */ +- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); +- + if (test_and_clear_bit(0, &mce_need_notify)) { +- /* wake processes polling /dev/mcelog */ +- wake_up_interruptible(&mce_chrdev_wait); +- +- if (mce_helper[0]) +- schedule_work(&mce_trigger_work); +- +- if (__ratelimit(&ratelimit)) +- pr_info(HW_ERR "Machine check events logged\n"); +- ++ mce_notify_work(); + return 1; + } + return 0; +@@ -1722,7 +1749,7 @@ + } + } + +-static void mce_start_timer(unsigned int cpu, struct timer_list *t) ++static void mce_start_timer(unsigned int cpu, struct hrtimer *t) + { + unsigned long iv = check_interval * HZ; + +@@ -1731,16 +1758,17 @@ + + per_cpu(mce_next_interval, cpu) = iv; + +- t->expires = round_jiffies(jiffies + iv); +- add_timer_on(t, cpu); ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), ++ 0, HRTIMER_MODE_REL_PINNED); + } + + static void __mcheck_cpu_init_timer(void) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); ++ struct hrtimer *t = this_cpu_ptr(&mce_timer); + unsigned int cpu = smp_processor_id(); + +- setup_pinned_timer(t, mce_timer_fn, cpu); ++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ t->function = mce_timer_fn; + mce_start_timer(cpu, t); + } + +@@ -2465,6 +2493,8 @@ + if (!mce_available(raw_cpu_ptr(&cpu_info))) + return; + ++ hrtimer_cancel(this_cpu_ptr(&mce_timer)); ++ + if (!(action & CPU_TASKS_FROZEN)) + cmci_clear(); + +@@ -2487,6 +2517,7 @@ + if (b->init) + wrmsrl(msr_ops.ctl(i), b->ctl); + } ++ __mcheck_cpu_init_timer(); + } + + /* Get notified when a cpu comes on/off. Be hotplug friendly. */ +@@ -2494,7 +2525,6 @@ + mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; +- struct timer_list *t = &per_cpu(mce_timer, cpu); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: +@@ -2514,11 +2544,9 @@ + break; + case CPU_DOWN_PREPARE: + smp_call_function_single(cpu, mce_disable_cpu, &action, 1); +- del_timer_sync(t); + break; + case CPU_DOWN_FAILED: + smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); +- mce_start_timer(cpu, t); + break; + } + +@@ -2557,6 +2585,10 @@ + goto err_out; + } + ++ err = mce_notify_work_init(); ++ if (err) ++ goto err_out; ++ + if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { + err = -ENOMEM; + goto err_out; +diff -Nur linux-4.9.28.orig/arch/x86/kernel/irq_32.c linux-4.9.28/arch/x86/kernel/irq_32.c +--- linux-4.9.28.orig/arch/x86/kernel/irq_32.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kernel/irq_32.c 2017-05-19 03:37:25.130174719 +0200 +@@ -127,6 +127,7 @@ + cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct irq_stack *irqstk; +@@ -143,6 +144,7 @@ + + call_on_stack(__do_softirq, isp); + } ++#endif + + bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) + { +diff -Nur linux-4.9.28.orig/arch/x86/kernel/process_32.c linux-4.9.28/arch/x86/kernel/process_32.c +--- linux-4.9.28.orig/arch/x86/kernel/process_32.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kernel/process_32.c 2017-05-19 03:37:25.130174719 +0200 +@@ -35,6 +35,7 @@ + #include <linux/uaccess.h> + #include <linux/io.h> + #include <linux/kdebug.h> ++#include <linux/highmem.h> + + #include <asm/pgtable.h> + #include <asm/ldt.h> +@@ -195,6 +196,35 @@ + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; ++ ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -271,6 +301,8 @@ + task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) + __switch_to_xtra(prev_p, next_p, tss); + ++ switch_kmaps(prev_p, next_p); ++ + /* + * Leave lazy mode, flushing any hypercalls made here. + * This must be done before restoring TLS segments so +diff -Nur linux-4.9.28.orig/arch/x86/kvm/lapic.c linux-4.9.28/arch/x86/kvm/lapic.c +--- linux-4.9.28.orig/arch/x86/kvm/lapic.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kvm/lapic.c 2017-05-19 03:37:25.134174924 +0200 +@@ -1939,6 +1939,7 @@ + hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_PINNED); + apic->lapic_timer.timer.function = apic_timer_fn; ++ apic->lapic_timer.timer.irqsafe = 1; + + /* + * APIC is created enabled. This will prevent kvm_lapic_set_base from +diff -Nur linux-4.9.28.orig/arch/x86/kvm/x86.c linux-4.9.28/arch/x86/kvm/x86.c +--- linux-4.9.28.orig/arch/x86/kvm/x86.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/kvm/x86.c 2017-05-19 03:37:25.134174924 +0200 +@@ -5933,6 +5933,13 @@ + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } ++#endif ++ + r = kvm_mmu_module_init(); + if (r) + goto out_free_percpu; +diff -Nur linux-4.9.28.orig/arch/x86/mm/highmem_32.c linux-4.9.28/arch/x86/mm/highmem_32.c +--- linux-4.9.28.orig/arch/x86/mm/highmem_32.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/mm/highmem_32.c 2017-05-19 03:37:25.134174924 +0200 +@@ -32,10 +32,11 @@ + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + + if (!PageHighMem(page)) +@@ -45,7 +46,10 @@ + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -88,6 +92,9 @@ + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +@@ -100,7 +107,7 @@ + #endif + + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + +diff -Nur linux-4.9.28.orig/arch/x86/mm/iomap_32.c linux-4.9.28/arch/x86/mm/iomap_32.c +--- linux-4.9.28.orig/arch/x86/mm/iomap_32.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/mm/iomap_32.c 2017-05-19 03:37:25.134174924 +0200 +@@ -56,6 +56,7 @@ + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -65,7 +66,12 @@ + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -113,6 +119,9 @@ + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +diff -Nur linux-4.9.28.orig/arch/x86/mm/pageattr.c linux-4.9.28/arch/x86/mm/pageattr.c +--- linux-4.9.28.orig/arch/x86/mm/pageattr.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/mm/pageattr.c 2017-05-19 03:37:25.134174924 +0200 +@@ -214,7 +214,15 @@ + int in_flags, struct page **pages) + { + unsigned int i, level; ++#ifdef CONFIG_PREEMPT ++ /* ++ * Avoid wbinvd() because it causes latencies on all CPUs, ++ * regardless of any CPU isolation that may be in effect. ++ */ ++ unsigned long do_wbinvd = 0; ++#else + unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ ++#endif + + BUG_ON(irqs_disabled()); + +diff -Nur linux-4.9.28.orig/arch/x86/platform/uv/tlb_uv.c linux-4.9.28/arch/x86/platform/uv/tlb_uv.c +--- linux-4.9.28.orig/arch/x86/platform/uv/tlb_uv.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/platform/uv/tlb_uv.c 2017-05-19 03:37:25.134174924 +0200 +@@ -748,9 +748,9 @@ + + quiesce_local_uvhub(hmaster); + +- spin_lock(&hmaster->queue_lock); ++ raw_spin_lock(&hmaster->queue_lock); + reset_with_ipi(&bau_desc->distribution, bcp); +- spin_unlock(&hmaster->queue_lock); ++ raw_spin_unlock(&hmaster->queue_lock); + + end_uvhub_quiesce(hmaster); + +@@ -770,9 +770,9 @@ + + quiesce_local_uvhub(hmaster); + +- spin_lock(&hmaster->queue_lock); ++ raw_spin_lock(&hmaster->queue_lock); + reset_with_ipi(&bau_desc->distribution, bcp); +- spin_unlock(&hmaster->queue_lock); ++ raw_spin_unlock(&hmaster->queue_lock); + + end_uvhub_quiesce(hmaster); + +@@ -793,7 +793,7 @@ + cycles_t tm1; + + hmaster = bcp->uvhub_master; +- spin_lock(&hmaster->disable_lock); ++ raw_spin_lock(&hmaster->disable_lock); + if (!bcp->baudisabled) { + stat->s_bau_disabled++; + tm1 = get_cycles(); +@@ -806,7 +806,7 @@ + } + } + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + } + + static void count_max_concurr(int stat, struct bau_control *bcp, +@@ -869,7 +869,7 @@ + */ + static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) + { +- spinlock_t *lock = &hmaster->uvhub_lock; ++ raw_spinlock_t *lock = &hmaster->uvhub_lock; + atomic_t *v; + + v = &hmaster->active_descriptor_count; +@@ -1002,7 +1002,7 @@ + struct bau_control *hmaster; + + hmaster = bcp->uvhub_master; +- spin_lock(&hmaster->disable_lock); ++ raw_spin_lock(&hmaster->disable_lock); + if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { + stat->s_bau_reenabled++; + for_each_present_cpu(tcpu) { +@@ -1014,10 +1014,10 @@ + tbcp->period_giveups = 0; + } + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + return 0; + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + return -1; + } + +@@ -1940,9 +1940,9 @@ + bcp->cong_reps = congested_reps; + bcp->disabled_period = sec_2_cycles(disabled_period); + bcp->giveup_limit = giveup_limit; +- spin_lock_init(&bcp->queue_lock); +- spin_lock_init(&bcp->uvhub_lock); +- spin_lock_init(&bcp->disable_lock); ++ raw_spin_lock_init(&bcp->queue_lock); ++ raw_spin_lock_init(&bcp->uvhub_lock); ++ raw_spin_lock_init(&bcp->disable_lock); + } + } + +diff -Nur linux-4.9.28.orig/arch/x86/platform/uv/uv_time.c linux-4.9.28/arch/x86/platform/uv/uv_time.c +--- linux-4.9.28.orig/arch/x86/platform/uv/uv_time.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/arch/x86/platform/uv/uv_time.c 2017-05-19 03:37:25.134174924 +0200 +@@ -57,7 +57,7 @@ + + /* There is one of these allocated per node */ + struct uv_rtc_timer_head { +- spinlock_t lock; ++ raw_spinlock_t lock; + /* next cpu waiting for timer, local node relative: */ + int next_cpu; + /* number of cpus on this node: */ +@@ -177,7 +177,7 @@ + uv_rtc_deallocate_timers(); + return -ENOMEM; + } +- spin_lock_init(&head->lock); ++ raw_spin_lock_init(&head->lock); + head->ncpus = uv_blade_nr_possible_cpus(bid); + head->next_cpu = -1; + blade_info[bid] = head; +@@ -231,7 +231,7 @@ + unsigned long flags; + int next_cpu; + +- spin_lock_irqsave(&head->lock, flags); ++ raw_spin_lock_irqsave(&head->lock, flags); + + next_cpu = head->next_cpu; + *t = expires; +@@ -243,12 +243,12 @@ + if (uv_setup_intr(cpu, expires)) { + *t = ULLONG_MAX; + uv_rtc_find_next_timer(head, pnode); +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + return -ETIME; + } + } + +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + return 0; + } + +@@ -267,7 +267,7 @@ + unsigned long flags; + int rc = 0; + +- spin_lock_irqsave(&head->lock, flags); ++ raw_spin_lock_irqsave(&head->lock, flags); + + if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) + rc = 1; +@@ -279,7 +279,7 @@ + uv_rtc_find_next_timer(head, pnode); + } + +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + + return rc; + } +@@ -299,13 +299,18 @@ + static cycle_t uv_read_rtc(struct clocksource *cs) + { + unsigned long offset; ++ cycle_t cycles; + ++ preempt_disable(); + if (uv_get_min_hub_revision_id() == 1) + offset = 0; + else + offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; + +- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); ++ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); ++ preempt_enable(); ++ ++ return cycles; + } + + /* +diff -Nur linux-4.9.28.orig/block/blk-core.c linux-4.9.28/block/blk-core.c +--- linux-4.9.28.orig/block/blk-core.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/block/blk-core.c 2017-05-19 03:37:25.134174924 +0200 +@@ -125,6 +125,9 @@ + + INIT_LIST_HEAD(&rq->queuelist); + INIT_LIST_HEAD(&rq->timeout_list); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + rq->cpu = -1; + rq->q = q; + rq->__sector = (sector_t) -1; +@@ -233,7 +236,7 @@ + **/ + void blk_start_queue(struct request_queue *q) + { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); +@@ -659,7 +662,7 @@ + if (nowait) + return -EBUSY; + +- ret = wait_event_interruptible(q->mq_freeze_wq, ++ ret = swait_event_interruptible(q->mq_freeze_wq, + !atomic_read(&q->mq_freeze_depth) || + blk_queue_dying(q)); + if (blk_queue_dying(q)) +@@ -679,7 +682,7 @@ + struct request_queue *q = + container_of(ref, struct request_queue, q_usage_counter); + +- wake_up_all(&q->mq_freeze_wq); ++ swake_up_all(&q->mq_freeze_wq); + } + + static void blk_rq_timed_out_timer(unsigned long data) +@@ -748,7 +751,7 @@ + q->bypass_depth = 1; + __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + +- init_waitqueue_head(&q->mq_freeze_wq); ++ init_swait_queue_head(&q->mq_freeze_wq); + + /* + * Init percpu_ref in atomic mode so that it's faster to shutdown. +@@ -3200,7 +3203,7 @@ + blk_run_queue_async(q); + else + __blk_run_queue(q); +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +@@ -3248,7 +3251,6 @@ + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) + { + struct request_queue *q; +- unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; +@@ -3268,11 +3270,6 @@ + q = NULL; + depth = 0; + +- /* +- * Save and disable interrupts here, to avoid doing it for every +- * queue lock we have to take. +- */ +- local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); +@@ -3285,7 +3282,7 @@ + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; +- spin_lock(q->queue_lock); ++ spin_lock_irq(q->queue_lock); + } + + /* +@@ -3312,8 +3309,6 @@ + */ + if (q) + queue_unplugged(q, depth, from_schedule); +- +- local_irq_restore(flags); + } + + void blk_finish_plug(struct blk_plug *plug) +diff -Nur linux-4.9.28.orig/block/blk-ioc.c linux-4.9.28/block/blk-ioc.c +--- linux-4.9.28.orig/block/blk-ioc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/block/blk-ioc.c 2017-05-19 03:37:25.134174924 +0200 +@@ -7,6 +7,7 @@ + #include <linux/bio.h> + #include <linux/blkdev.h> + #include <linux/slab.h> ++#include <linux/delay.h> + + #include "blk.h" + +@@ -109,7 +110,7 @@ + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -187,7 +188,7 @@ + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } +diff -Nur linux-4.9.28.orig/block/blk-mq.c linux-4.9.28/block/blk-mq.c +--- linux-4.9.28.orig/block/blk-mq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/block/blk-mq.c 2017-05-19 03:37:25.134174924 +0200 +@@ -72,7 +72,7 @@ + + static void blk_mq_freeze_queue_wait(struct request_queue *q) + { +- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); ++ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); + } + + /* +@@ -110,7 +110,7 @@ + WARN_ON_ONCE(freeze_depth < 0); + if (!freeze_depth) { + percpu_ref_reinit(&q->q_usage_counter); +- wake_up_all(&q->mq_freeze_wq); ++ swake_up_all(&q->mq_freeze_wq); + } + } + EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); +@@ -129,7 +129,7 @@ + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ +- wake_up_all(&q->mq_freeze_wq); ++ swake_up_all(&q->mq_freeze_wq); + } + + bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) +@@ -177,6 +177,9 @@ + rq->resid_len = 0; + rq->sense = NULL; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + +@@ -345,6 +348,17 @@ + } + EXPORT_SYMBOL(blk_mq_end_request); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++void __blk_mq_complete_request_remote_work(struct work_struct *work) ++{ ++ struct request *rq = container_of(work, struct request, work); ++ ++ rq->q->softirq_done_fn(rq); ++} ++ ++#else ++ + static void __blk_mq_complete_request_remote(void *data) + { + struct request *rq = data; +@@ -352,6 +366,8 @@ + rq->q->softirq_done_fn(rq); + } + ++#endif ++ + static void blk_mq_ipi_complete_request(struct request *rq) + { + struct blk_mq_ctx *ctx = rq->mq_ctx; +@@ -363,19 +379,23 @@ + return; + } + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ schedule_work_on(ctx->cpu, &rq->work); ++#else + rq->csd.func = __blk_mq_complete_request_remote; + rq->csd.info = rq; + rq->csd.flags = 0; + smp_call_function_single_async(ctx->cpu, &rq->csd); ++#endif + } else { + rq->q->softirq_done_fn(rq); + } +- put_cpu(); ++ put_cpu_light(); + } + + static void __blk_mq_complete_request(struct request *rq) +@@ -906,14 +926,14 @@ + return; + + if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { +- int cpu = get_cpu(); ++ int cpu = get_cpu_light(); + if (cpumask_test_cpu(cpu, hctx->cpumask)) { + __blk_mq_run_hw_queue(hctx); +- put_cpu(); ++ put_cpu_light(); + return; + } + +- put_cpu(); ++ put_cpu_light(); + } + + kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); +diff -Nur linux-4.9.28.orig/block/blk-mq.h linux-4.9.28/block/blk-mq.h +--- linux-4.9.28.orig/block/blk-mq.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/block/blk-mq.h 2017-05-19 03:37:25.134174924 +0200 +@@ -72,12 +72,12 @@ + */ + static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) + { +- return __blk_mq_get_ctx(q, get_cpu()); ++ return __blk_mq_get_ctx(q, get_cpu_light()); + } + + static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) + { +- put_cpu(); ++ put_cpu_light(); + } + + struct blk_mq_alloc_data { +diff -Nur linux-4.9.28.orig/block/blk-softirq.c linux-4.9.28/block/blk-softirq.c +--- linux-4.9.28.orig/block/blk-softirq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/block/blk-softirq.c 2017-05-19 03:37:25.134174924 +0200 +@@ -51,6 +51,7 @@ + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -89,6 +90,7 @@ + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + return 0; + } +@@ -141,6 +143,7 @@ + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /** +diff -Nur linux-4.9.28.orig/block/bounce.c linux-4.9.28/block/bounce.c +--- linux-4.9.28.orig/block/bounce.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/block/bounce.c 2017-05-19 03:37:25.134174924 +0200 +@@ -55,11 +55,11 @@ + unsigned long flags; + unsigned char *vto; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + #else /* CONFIG_HIGHMEM */ +diff -Nur linux-4.9.28.orig/crypto/algapi.c linux-4.9.28/crypto/algapi.c +--- linux-4.9.28.orig/crypto/algapi.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/crypto/algapi.c 2017-05-19 03:37:25.134174924 +0200 +@@ -719,13 +719,13 @@ + + int crypto_register_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_register(&crypto_chain, nb); ++ return srcu_notifier_chain_register(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_register_notifier); + + int crypto_unregister_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_unregister(&crypto_chain, nb); ++ return srcu_notifier_chain_unregister(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_unregister_notifier); + +diff -Nur linux-4.9.28.orig/crypto/api.c linux-4.9.28/crypto/api.c +--- linux-4.9.28.orig/crypto/api.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/crypto/api.c 2017-05-19 03:37:25.134174924 +0200 +@@ -31,7 +31,7 @@ + DECLARE_RWSEM(crypto_alg_sem); + EXPORT_SYMBOL_GPL(crypto_alg_sem); + +-BLOCKING_NOTIFIER_HEAD(crypto_chain); ++SRCU_NOTIFIER_HEAD(crypto_chain); + EXPORT_SYMBOL_GPL(crypto_chain); + + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +@@ -236,10 +236,10 @@ + { + int ok; + +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + if (ok == NOTIFY_DONE) { + request_module("cryptomgr"); +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + } + + return ok; +diff -Nur linux-4.9.28.orig/crypto/internal.h linux-4.9.28/crypto/internal.h +--- linux-4.9.28.orig/crypto/internal.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/crypto/internal.h 2017-05-19 03:37:25.138175108 +0200 +@@ -47,7 +47,7 @@ + + extern struct list_head crypto_alg_list; + extern struct rw_semaphore crypto_alg_sem; +-extern struct blocking_notifier_head crypto_chain; ++extern struct srcu_notifier_head crypto_chain; + + #ifdef CONFIG_PROC_FS + void __init crypto_init_proc(void); +@@ -146,7 +146,7 @@ + + static inline void crypto_notify(unsigned long val, void *v) + { +- blocking_notifier_call_chain(&crypto_chain, val, v); ++ srcu_notifier_call_chain(&crypto_chain, val, v); + } + + #endif /* _CRYPTO_INTERNAL_H */ +diff -Nur linux-4.9.28.orig/Documentation/sysrq.txt linux-4.9.28/Documentation/sysrq.txt +--- linux-4.9.28.orig/Documentation/sysrq.txt 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/Documentation/sysrq.txt 2017-05-19 03:37:25.118174016 +0200 +@@ -59,10 +59,17 @@ + On other - If you know of the key combos for other architectures, please + let me know so I can add them to this section. + +-On all - write a character to /proc/sysrq-trigger. e.g.: +- ++On all - write a character to /proc/sysrq-trigger, e.g.: + echo t > /proc/sysrq-trigger + ++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. ++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq ++ Send an ICMP echo request with this pattern plus the particular ++ SysRq command key. Example: ++ # ping -c1 -s57 -p0102030468 ++ will trigger the SysRq-H (help) command. ++ ++ + * What are the 'command' keys? + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + 'b' - Will immediately reboot the system without syncing or unmounting +diff -Nur linux-4.9.28.orig/Documentation/trace/histograms.txt linux-4.9.28/Documentation/trace/histograms.txt +--- linux-4.9.28.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/Documentation/trace/histograms.txt 2017-05-19 03:37:25.118174016 +0200 +@@ -0,0 +1,186 @@ ++ Using the Linux Kernel Latency Histograms ++ ++ ++This document gives a short explanation how to enable, configure and use ++latency histograms. Latency histograms are primarily relevant in the ++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) ++and are used in the quality management of the Linux real-time ++capabilities. ++ ++ ++* Purpose of latency histograms ++ ++A latency histogram continuously accumulates the frequencies of latency ++data. There are two types of histograms ++- potential sources of latencies ++- effective latencies ++ ++ ++* Potential sources of latencies ++ ++Potential sources of latencies are code segments where interrupts, ++preemption or both are disabled (aka critical sections). To create ++histograms of potential sources of latency, the kernel stores the time ++stamp at the start of a critical section, determines the time elapsed ++when the end of the section is reached, and increments the frequency ++counter of that latency value - irrespective of whether any concurrently ++running process is affected by latency or not. ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_INTERRUPT_OFF_LATENCY ++ CONFIG_PREEMPT_OFF_LATENCY ++ ++ ++* Effective latencies ++ ++Effective latencies are actually occuring during wakeup of a process. To ++determine effective latencies, the kernel stores the time stamp when a ++process is scheduled to be woken up, and determines the duration of the ++wakeup time shortly before control is passed over to this process. Note ++that the apparent latency in user space may be somewhat longer, since the ++process may be interrupted after control is passed over to it but before ++the execution in user space takes place. Simply measuring the interval ++between enqueuing and wakeup may also not appropriate in cases when a ++process is scheduled as a result of a timer expiration. The timer may have ++missed its deadline, e.g. due to disabled interrupts, but this latency ++would not be registered. Therefore, the offsets of missed timers are ++recorded in a separate histogram. If both wakeup latency and missed timer ++offsets are configured and enabled, a third histogram may be enabled that ++records the overall latency as a sum of the timer latency, if any, and the ++wakeup latency. This histogram is called "timerandwakeup". ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_WAKEUP_LATENCY ++ CONFIG_MISSED_TIMER_OFSETS ++ ++ ++* Usage ++ ++The interface to the administration of the latency histograms is located ++in the debugfs file system. To mount it, either enter ++ ++mount -t sysfs nodev /sys ++mount -t debugfs nodev /sys/kernel/debug ++ ++from shell command line level, or add ++ ++nodev /sys sysfs defaults 0 0 ++nodev /sys/kernel/debug debugfs defaults 0 0 ++ ++to the file /etc/fstab. All latency histogram related files are then ++available in the directory /sys/kernel/debug/tracing/latency_hist. A ++particular histogram type is enabled by writing non-zero to the related ++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. ++Select "preemptirqsoff" for the histograms of potential sources of ++latencies and "wakeup" for histograms of effective latencies etc. The ++histogram data - one per CPU - are available in the files ++ ++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx ++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx ++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx ++ ++The histograms are reset by writing non-zero to the file "reset" in a ++particular latency directory. To reset all latency data, use ++ ++#!/bin/sh ++ ++TRACINGDIR=/sys/kernel/debug/tracing ++HISTDIR=$TRACINGDIR/latency_hist ++ ++if test -d $HISTDIR ++then ++ cd $HISTDIR ++ for i in `find . | grep /reset$` ++ do ++ echo 1 >$i ++ done ++fi ++ ++ ++* Data format ++ ++Latency data are stored with a resolution of one microsecond. The ++maximum latency is 10,240 microseconds. The data are only valid, if the ++overflow register is empty. Every output line contains the latency in ++microseconds in the first row and the number of samples in the second ++row. To display only lines with a positive latency count, use, for ++example, ++ ++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 ++ ++#Minimum latency: 0 microseconds. ++#Average latency: 0 microseconds. ++#Maximum latency: 25 microseconds. ++#Total samples: 3104770694 ++#There are 0 samples greater or equal than 10240 microseconds ++#usecs samples ++ 0 2984486876 ++ 1 49843506 ++ 2 58219047 ++ 3 5348126 ++ 4 2187960 ++ 5 3388262 ++ 6 959289 ++ 7 208294 ++ 8 40420 ++ 9 4485 ++ 10 14918 ++ 11 18340 ++ 12 25052 ++ 13 19455 ++ 14 5602 ++ 15 969 ++ 16 47 ++ 17 18 ++ 18 14 ++ 19 1 ++ 20 3 ++ 21 2 ++ 22 5 ++ 23 2 ++ 25 1 ++ ++ ++* Wakeup latency of a selected process ++ ++To only collect wakeup latency data of a particular process, write the ++PID of the requested process to ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/pid ++ ++PIDs are not considered, if this variable is set to 0. ++ ++ ++* Details of the process with the highest wakeup latency so far ++ ++Selected data of the process that suffered from the highest wakeup ++latency that occurred in a particular CPU are available in the file ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. ++ ++In addition, other relevant system data at the time when the ++latency occurred are given. ++ ++The format of the data is (all in one line): ++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \ ++<- <PID> <Priority> <Command> <Timestamp> ++ ++The value of <Timeroffset> is only relevant in the combined timer ++and wakeup latency recording. In the wakeup recording, it is ++always 0, in the missed_timer_offsets recording, it is the same ++as <Latency>. ++ ++When retrospectively searching for the origin of a latency and ++tracing was not enabled, it may be helpful to know the name and ++some basic data of the task that (finally) was switching to the ++late real-tlme task. In addition to the victim's data, also the ++data of the possible culprit are therefore displayed after the ++"<-" symbol. ++ ++Finally, the timestamp of the time when the latency occurred ++in <seconds>.<microseconds> after the most recent system boot ++is provided. ++ ++These data are also reset when the wakeup histogram is reset. +diff -Nur linux-4.9.28.orig/drivers/acpi/acpica/acglobal.h linux-4.9.28/drivers/acpi/acpica/acglobal.h +--- linux-4.9.28.orig/drivers/acpi/acpica/acglobal.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/acpi/acpica/acglobal.h 2017-05-19 03:37:25.138175108 +0200 +@@ -116,7 +116,7 @@ + * interrupt level + */ + ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ +-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ ++ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ + ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); + + /* Mutex for _OSI support */ +diff -Nur linux-4.9.28.orig/drivers/acpi/acpica/hwregs.c linux-4.9.28/drivers/acpi/acpica/hwregs.c +--- linux-4.9.28.orig/drivers/acpi/acpica/hwregs.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/acpi/acpica/hwregs.c 2017-05-19 03:37:25.138175108 +0200 +@@ -363,14 +363,14 @@ + ACPI_BITMASK_ALL_FIXED_STATUS, + ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* Clear the fixed events in PM1 A/B */ + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, + ACPI_BITMASK_ALL_FIXED_STATUS); + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + + if (ACPI_FAILURE(status)) { + goto exit; +diff -Nur linux-4.9.28.orig/drivers/acpi/acpica/hwxface.c linux-4.9.28/drivers/acpi/acpica/hwxface.c +--- linux-4.9.28.orig/drivers/acpi/acpica/hwxface.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/acpi/acpica/hwxface.c 2017-05-19 03:37:25.138175108 +0200 +@@ -373,7 +373,7 @@ + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* + * At this point, we know that the parent register is one of the +@@ -434,7 +434,7 @@ + + unlock_and_exit: + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + return_ACPI_STATUS(status); + } + +diff -Nur linux-4.9.28.orig/drivers/acpi/acpica/utmutex.c linux-4.9.28/drivers/acpi/acpica/utmutex.c +--- linux-4.9.28.orig/drivers/acpi/acpica/utmutex.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/acpi/acpica/utmutex.c 2017-05-19 03:37:25.138175108 +0200 +@@ -88,7 +88,7 @@ + return_ACPI_STATUS (status); + } + +- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); ++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } +@@ -145,7 +145,7 @@ + /* Delete the spinlocks */ + + acpi_os_delete_lock(acpi_gbl_gpe_lock); +- acpi_os_delete_lock(acpi_gbl_hardware_lock); ++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); + acpi_os_delete_lock(acpi_gbl_reference_count_lock); + + /* Delete the reader/writer lock */ +diff -Nur linux-4.9.28.orig/drivers/ata/libata-sff.c linux-4.9.28/drivers/ata/libata-sff.c +--- linux-4.9.28.orig/drivers/ata/libata-sff.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ata/libata-sff.c 2017-05-19 03:37:25.138175108 +0200 +@@ -678,9 +678,9 @@ + unsigned long flags; + unsigned int consumed; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return consumed; + } +@@ -719,7 +719,7 @@ + unsigned long flags; + + /* FIXME: use a bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -727,7 +727,7 @@ + do_write); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, +@@ -864,7 +864,7 @@ + unsigned long flags; + + /* FIXME: use bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -872,7 +872,7 @@ + count, rw); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, +diff -Nur linux-4.9.28.orig/drivers/block/zram/zcomp.c linux-4.9.28/drivers/block/zram/zcomp.c +--- linux-4.9.28.orig/drivers/block/zram/zcomp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/block/zram/zcomp.c 2017-05-19 03:37:25.138175108 +0200 +@@ -118,12 +118,19 @@ + + struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) + { +- return *get_cpu_ptr(comp->stream); ++ struct zcomp_strm *zstrm; ++ ++ zstrm = *this_cpu_ptr(comp->stream); ++ spin_lock(&zstrm->zcomp_lock); ++ return zstrm; + } + + void zcomp_stream_put(struct zcomp *comp) + { +- put_cpu_ptr(comp->stream); ++ struct zcomp_strm *zstrm; ++ ++ zstrm = *this_cpu_ptr(comp->stream); ++ spin_unlock(&zstrm->zcomp_lock); + } + + int zcomp_compress(struct zcomp_strm *zstrm, +@@ -174,6 +181,7 @@ + pr_err("Can't allocate a compression stream\n"); + return NOTIFY_BAD; + } ++ spin_lock_init(&zstrm->zcomp_lock); + *per_cpu_ptr(comp->stream, cpu) = zstrm; + break; + case CPU_DEAD: +diff -Nur linux-4.9.28.orig/drivers/block/zram/zcomp.h linux-4.9.28/drivers/block/zram/zcomp.h +--- linux-4.9.28.orig/drivers/block/zram/zcomp.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/block/zram/zcomp.h 2017-05-19 03:37:25.138175108 +0200 +@@ -14,6 +14,7 @@ + /* compression/decompression buffer */ + void *buffer; + struct crypto_comp *tfm; ++ spinlock_t zcomp_lock; + }; + + /* dynamic per-device compression frontend */ +diff -Nur linux-4.9.28.orig/drivers/block/zram/zram_drv.c linux-4.9.28/drivers/block/zram/zram_drv.c +--- linux-4.9.28.orig/drivers/block/zram/zram_drv.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/block/zram/zram_drv.c 2017-05-19 03:37:25.138175108 +0200 +@@ -528,6 +528,8 @@ + goto out_error; + } + ++ zram_meta_init_table_locks(meta, disksize); ++ + return meta; + + out_error: +@@ -575,28 +577,28 @@ + struct zram_meta *meta = zram->meta; + unsigned long handle; + unsigned int size; ++ struct zcomp_strm *zstrm; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + handle = meta->table[index].handle; + size = zram_get_obj_size(meta, index); + + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + memset(mem, 0, PAGE_SIZE); + return 0; + } + ++ zstrm = zcomp_stream_get(zram->comp); + cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); + if (size == PAGE_SIZE) { + memcpy(mem, cmem, PAGE_SIZE); + } else { +- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); +- + ret = zcomp_decompress(zstrm, cmem, size, mem); +- zcomp_stream_put(zram->comp); + } + zs_unmap_object(meta->mem_pool, handle); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zcomp_stream_put(zram->comp); ++ zram_unlock_table(&meta->table[index]); + + /* Should NEVER happen. Return bio error if it does. */ + if (unlikely(ret)) { +@@ -616,14 +618,14 @@ + struct zram_meta *meta = zram->meta; + page = bvec->bv_page; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + if (unlikely(!meta->table[index].handle) || + zram_test_flag(meta, index, ZRAM_ZERO)) { +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + handle_zero_page(bvec); + return 0; + } +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + if (is_partial_io(bvec)) + /* Use a temporary buffer to decompress the page */ +@@ -700,10 +702,10 @@ + if (user_mem) + kunmap_atomic(user_mem); + /* Free memory associated with this sector now. */ +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); + zram_set_flag(meta, index, ZRAM_ZERO); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + atomic64_inc(&zram->stats.zero_pages); + ret = 0; +@@ -794,12 +796,12 @@ + * Free memory associated with this sector + * before overwriting unused sectors. + */ +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); + + meta->table[index].handle = handle; + zram_set_obj_size(meta, index, clen); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + /* Update stats */ + atomic64_add(clen, &zram->stats.compr_data_size); +@@ -842,9 +844,9 @@ + } + + while (n >= PAGE_SIZE) { +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + atomic64_inc(&zram->stats.notify_free); + index++; + n -= PAGE_SIZE; +@@ -973,9 +975,9 @@ + zram = bdev->bd_disk->private_data; + meta = zram->meta; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + atomic64_inc(&zram->stats.notify_free); + } + +diff -Nur linux-4.9.28.orig/drivers/block/zram/zram_drv.h linux-4.9.28/drivers/block/zram/zram_drv.h +--- linux-4.9.28.orig/drivers/block/zram/zram_drv.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/block/zram/zram_drv.h 2017-05-19 03:37:25.138175108 +0200 +@@ -73,6 +73,9 @@ + struct zram_table_entry { + unsigned long handle; + unsigned long value; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t lock; ++#endif + }; + + struct zram_stats { +@@ -120,4 +123,42 @@ + */ + bool claim; /* Protected by bdev->bd_mutex */ + }; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++static inline void zram_lock_table(struct zram_table_entry *table) ++{ ++ bit_spin_lock(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_unlock_table(struct zram_table_entry *table) ++{ ++ bit_spin_unlock(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { } ++#else /* CONFIG_PREEMPT_RT_BASE */ ++static inline void zram_lock_table(struct zram_table_entry *table) ++{ ++ spin_lock(&table->lock); ++ __set_bit(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_unlock_table(struct zram_table_entry *table) ++{ ++ __clear_bit(ZRAM_ACCESS, &table->value); ++ spin_unlock(&table->lock); ++} ++ ++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) ++{ ++ size_t num_pages = disksize >> PAGE_SHIFT; ++ size_t index; ++ ++ for (index = 0; index < num_pages; index++) { ++ spinlock_t *lock = &meta->table[index].lock; ++ spin_lock_init(lock); ++ } ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + #endif +diff -Nur linux-4.9.28.orig/drivers/char/random.c linux-4.9.28/drivers/char/random.c +--- linux-4.9.28.orig/drivers/char/random.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/char/random.c 2017-05-19 03:37:25.138175108 +0200 +@@ -262,6 +262,7 @@ + #include <linux/syscalls.h> + #include <linux/completion.h> + #include <linux/uuid.h> ++#include <linux/locallock.h> + #include <crypto/chacha20.h> + + #include <asm/processor.h> +@@ -1028,8 +1029,6 @@ + } sample; + long delta, delta2, delta3; + +- preempt_disable(); +- + sample.jiffies = jiffies; + sample.cycles = random_get_entropy(); + sample.num = num; +@@ -1070,7 +1069,6 @@ + */ + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + } +- preempt_enable(); + } + + void add_input_randomness(unsigned int type, unsigned int code, +@@ -1123,28 +1121,27 @@ + return *(ptr + f->reg_idx++); + } + +-void add_interrupt_randomness(int irq, int irq_flags) ++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) + { + struct entropy_store *r; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); +- struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + __u32 c_high, j_high; +- __u64 ip; + unsigned long seed; + int credit = 0; + + if (cycles == 0) +- cycles = get_reg(fast_pool, regs); ++ cycles = get_reg(fast_pool, NULL); + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + fast_pool->pool[0] ^= cycles ^ j_high ^ irq; + fast_pool->pool[1] ^= now ^ c_high; +- ip = regs ? instruction_pointer(regs) : _RET_IP_; ++ if (!ip) ++ ip = _RET_IP_; + fast_pool->pool[2] ^= ip; + fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : +- get_reg(fast_pool, regs); ++ get_reg(fast_pool, NULL); + + fast_mix(fast_pool); + add_interrupt_bench(cycles); +@@ -2056,6 +2053,7 @@ + * goal of being quite fast and not depleting entropy. + */ + static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long); ++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock); + unsigned long get_random_long(void) + { + unsigned long ret; +@@ -2064,13 +2062,13 @@ + if (arch_get_random_long(&ret)) + return ret; + +- batch = &get_cpu_var(batched_entropy_long); ++ batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long); + if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) { + extract_crng((u8 *)batch->entropy_long); + batch->position = 0; + } + ret = batch->entropy_long[batch->position++]; +- put_cpu_var(batched_entropy_long); ++ put_locked_var(batched_entropy_long_lock, batched_entropy_long); + return ret; + } + EXPORT_SYMBOL(get_random_long); +@@ -2082,6 +2080,8 @@ + } + #else + static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int); ++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock); ++ + unsigned int get_random_int(void) + { + unsigned int ret; +@@ -2090,13 +2090,13 @@ + if (arch_get_random_int(&ret)) + return ret; + +- batch = &get_cpu_var(batched_entropy_int); ++ batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int); + if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) { + extract_crng((u8 *)batch->entropy_int); + batch->position = 0; + } + ret = batch->entropy_int[batch->position++]; +- put_cpu_var(batched_entropy_int); ++ put_locked_var(batched_entropy_int_lock, batched_entropy_int); + return ret; + } + #endif +diff -Nur linux-4.9.28.orig/drivers/clocksource/tcb_clksrc.c linux-4.9.28/drivers/clocksource/tcb_clksrc.c +--- linux-4.9.28.orig/drivers/clocksource/tcb_clksrc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/clocksource/tcb_clksrc.c 2017-05-19 03:37:25.138175108 +0200 +@@ -23,8 +23,7 @@ + * this 32 bit free-running counter. the second channel is not used. + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -74,6 +73,8 @@ + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ bool clk_enabled; ++ u32 freq; + void __iomem *regs; + }; + +@@ -82,15 +83,26 @@ + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + ++static void tc_clk_disable(struct clock_event_device *d) ++{ ++ struct tc_clkevt_device *tcd = to_tc_clkevt(d); ++ ++ clk_disable(tcd->clk); ++ tcd->clk_enabled = false; ++} ++ ++static void tc_clk_enable(struct clock_event_device *d) ++{ ++ struct tc_clkevt_device *tcd = to_tc_clkevt(d); ++ ++ if (tcd->clk_enabled) ++ return; ++ clk_enable(tcd->clk); ++ tcd->clk_enabled = true; ++} ++ + static int tc_shutdown(struct clock_event_device *d) + { + struct tc_clkevt_device *tcd = to_tc_clkevt(d); +@@ -98,8 +110,14 @@ + + __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); + __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); ++ return 0; ++} ++ ++static int tc_shutdown_clk_off(struct clock_event_device *d) ++{ ++ tc_shutdown(d); + if (!clockevent_state_detached(d)) +- clk_disable(tcd->clk); ++ tc_clk_disable(d); + + return 0; + } +@@ -112,9 +130,9 @@ + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_shutdown(d); + +- clk_enable(tcd->clk); ++ tc_clk_enable(d); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | + ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -134,12 +152,12 @@ + /* By not making the gentime core emulate periodic mode on top + * of oneshot, we get lower overhead and improved accuracy. + */ +- clk_enable(tcd->clk); ++ tc_clk_enable(d); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -166,9 +184,13 @@ + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + /* Should be lower than at91rm9200's system timer */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + .rating = 125, ++#else ++ .rating = 200, ++#endif + .set_next_event = tc_next_event, +- .set_state_shutdown = tc_shutdown, ++ .set_state_shutdown = tc_shutdown_clk_off, + .set_state_periodic = tc_set_periodic, + .set_state_oneshot = tc_set_oneshot, + }, +@@ -188,8 +210,9 @@ + return IRQ_NONE; + } + +-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) + { ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + int ret; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; +@@ -210,7 +233,11 @@ + clkevt.regs = tc->regs; + clkevt.clk = t2_clk; + +- timer_clock = clk32k_divisor_idx; ++ timer_clock = divisor_idx; ++ if (!divisor) ++ clkevt.freq = 32768; ++ else ++ clkevt.freq = clk_get_rate(t2_clk) / divisor; + + clkevt.clkevt.cpumask = cpumask_of(0); + +@@ -221,7 +248,7 @@ + return ret; + } + +- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); ++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); + + return ret; + } +@@ -358,7 +385,11 @@ + goto err_disable_t1; + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + ret = setup_clkevents(tc, clk32k_divisor_idx); ++#else ++ ret = setup_clkevents(tc, best_divisor_idx); ++#endif + if (ret) + goto err_unregister_clksrc; + +diff -Nur linux-4.9.28.orig/drivers/clocksource/timer-atmel-pit.c linux-4.9.28/drivers/clocksource/timer-atmel-pit.c +--- linux-4.9.28.orig/drivers/clocksource/timer-atmel-pit.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/clocksource/timer-atmel-pit.c 2017-05-19 03:37:25.138175108 +0200 +@@ -46,6 +46,7 @@ + u32 cycle; + u32 cnt; + unsigned int irq; ++ bool irq_requested; + struct clk *mck; + }; + +@@ -96,15 +97,29 @@ + + /* disable irq, leaving the clocksource active */ + pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN); ++ if (data->irq_requested) { ++ free_irq(data->irq, data); ++ data->irq_requested = false; ++ } + return 0; + } + ++static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id); + /* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ + static int pit_clkevt_set_periodic(struct clock_event_device *dev) + { + struct pit_data *data = clkevt_to_pit_data(dev); ++ int ret; ++ ++ ret = request_irq(data->irq, at91sam926x_pit_interrupt, ++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, ++ "at91_tick", data); ++ if (ret) ++ panic(pr_fmt("Unable to setup IRQ\n")); ++ ++ data->irq_requested = true; + + /* update clocksource counter */ + data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); +@@ -230,15 +245,6 @@ + return ret; + } + +- /* Set up irq handler */ +- ret = request_irq(data->irq, at91sam926x_pit_interrupt, +- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, +- "at91_tick", data); +- if (ret) { +- pr_err("Unable to setup IRQ\n"); +- return ret; +- } +- + /* Set up and register clockevents */ + data->clkevt.name = "pit"; + data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC; +diff -Nur linux-4.9.28.orig/drivers/clocksource/timer-atmel-st.c linux-4.9.28/drivers/clocksource/timer-atmel-st.c +--- linux-4.9.28.orig/drivers/clocksource/timer-atmel-st.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/clocksource/timer-atmel-st.c 2017-05-19 03:37:25.138175108 +0200 +@@ -115,18 +115,29 @@ + last_crtr = read_CRTR(); + } + ++static int atmel_st_irq; ++ + static int clkevt32k_shutdown(struct clock_event_device *evt) + { + clkdev32k_disable_and_flush_irq(); + irqmask = 0; + regmap_write(regmap_st, AT91_ST_IER, irqmask); ++ free_irq(atmel_st_irq, regmap_st); + return 0; + } + + static int clkevt32k_set_oneshot(struct clock_event_device *dev) + { ++ int ret; ++ + clkdev32k_disable_and_flush_irq(); + ++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, ++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, ++ "at91_tick", regmap_st); ++ if (ret) ++ panic(pr_fmt("Unable to setup IRQ\n")); ++ + /* + * ALM for oneshot irqs, set by next_event() + * before 32 seconds have passed. +@@ -139,8 +150,16 @@ + + static int clkevt32k_set_periodic(struct clock_event_device *dev) + { ++ int ret; ++ + clkdev32k_disable_and_flush_irq(); + ++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, ++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, ++ "at91_tick", regmap_st); ++ if (ret) ++ panic(pr_fmt("Unable to setup IRQ\n")); ++ + /* PIT for periodic irqs; fixed rate of 1/HZ */ + irqmask = AT91_ST_PITS; + regmap_write(regmap_st, AT91_ST_PIMR, timer_latch); +@@ -198,7 +217,7 @@ + { + struct clk *sclk; + unsigned int sclk_rate, val; +- int irq, ret; ++ int ret; + + regmap_st = syscon_node_to_regmap(node); + if (IS_ERR(regmap_st)) { +@@ -212,21 +231,12 @@ + regmap_read(regmap_st, AT91_ST_SR, &val); + + /* Get the interrupts property */ +- irq = irq_of_parse_and_map(node, 0); +- if (!irq) { ++ atmel_st_irq = irq_of_parse_and_map(node, 0); ++ if (!atmel_st_irq) { + pr_err("Unable to get IRQ from DT\n"); + return -EINVAL; + } + +- /* Make IRQs happen for the system timer */ +- ret = request_irq(irq, at91rm9200_timer_interrupt, +- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, +- "at91_tick", regmap_st); +- if (ret) { +- pr_err("Unable to setup IRQ\n"); +- return ret; +- } +- + sclk = of_clk_get(node, 0); + if (IS_ERR(sclk)) { + pr_err("Unable to get slow clock\n"); +diff -Nur linux-4.9.28.orig/drivers/connector/cn_proc.c linux-4.9.28/drivers/connector/cn_proc.c +--- linux-4.9.28.orig/drivers/connector/cn_proc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/connector/cn_proc.c 2017-05-19 03:37:25.138175108 +0200 +@@ -32,6 +32,7 @@ + #include <linux/pid_namespace.h> + + #include <linux/cn_proc.h> ++#include <linux/locallock.h> + + /* + * Size of a cn_msg followed by a proc_event structure. Since the +@@ -54,10 +55,11 @@ + + /* proc_event_counts is used as the sequence number of the netlink message */ + static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; ++static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock); + + static inline void send_msg(struct cn_msg *msg) + { +- preempt_disable(); ++ local_lock(send_msg_lock); + + msg->seq = __this_cpu_inc_return(proc_event_counts) - 1; + ((struct proc_event *)msg->data)->cpu = smp_processor_id(); +@@ -70,7 +72,7 @@ + */ + cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); + +- preempt_enable(); ++ local_unlock(send_msg_lock); + } + + void proc_fork_connector(struct task_struct *task) +diff -Nur linux-4.9.28.orig/drivers/cpufreq/Kconfig.x86 linux-4.9.28/drivers/cpufreq/Kconfig.x86 +--- linux-4.9.28.orig/drivers/cpufreq/Kconfig.x86 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/cpufreq/Kconfig.x86 2017-05-19 03:37:25.138175108 +0200 +@@ -124,7 +124,7 @@ + + config X86_POWERNOW_K8 + tristate "AMD Opteron/Athlon64 PowerNow!" +- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ ++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE + help + This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. + Support for K10 and newer processors is now in acpi-cpufreq. +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.9.28/drivers/gpu/drm/i915/i915_gem_execbuffer.c +--- linux-4.9.28.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2017-05-19 03:37:25.138175108 +0200 +@@ -1489,7 +1489,9 @@ + if (ret) + return ret; + ++#ifndef CONFIG_PREEMPT_RT_BASE + trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); ++#endif + + i915_gem_execbuffer_move_to_active(vmas, params->request); + +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.9.28/drivers/gpu/drm/i915/i915_gem_shrinker.c +--- linux-4.9.28.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/i915/i915_gem_shrinker.c 2017-05-19 03:37:25.138175108 +0200 +@@ -40,7 +40,7 @@ + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) ++#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.9.28/drivers/gpu/drm/i915/i915_irq.c +--- linux-4.9.28.orig/drivers/gpu/drm/i915/i915_irq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/i915/i915_irq.c 2017-05-19 03:37:25.142175268 +0200 +@@ -812,6 +812,7 @@ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_disable_rt(); + + /* Get optional system timestamp before query. */ + if (stime) +@@ -863,6 +864,7 @@ + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_enable_rt(); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/i915/intel_display.c linux-4.9.28/drivers/gpu/drm/i915/intel_display.c +--- linux-4.9.28.orig/drivers/gpu/drm/i915/intel_display.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/i915/intel_display.c 2017-05-19 03:37:25.142175268 +0200 +@@ -12131,7 +12131,7 @@ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; + +- WARN_ON(!in_interrupt()); ++ WARN_ON_NONRT(!in_interrupt()); + + if (crtc == NULL) + return; +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.9.28/drivers/gpu/drm/i915/intel_sprite.c +--- linux-4.9.28.orig/drivers/gpu/drm/i915/intel_sprite.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/i915/intel_sprite.c 2017-05-19 03:37:25.142175268 +0200 +@@ -35,6 +35,7 @@ + #include <drm/drm_rect.h> + #include <drm/drm_atomic.h> + #include <drm/drm_plane_helper.h> ++#include <linux/locallock.h> + #include "intel_drv.h" + #include "intel_frontbuffer.h" + #include <drm/i915_drm.h> +@@ -65,6 +66,8 @@ + 1000 * adjusted_mode->crtc_htotal); + } + ++static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); ++ + /** + * intel_pipe_update_start() - start update of a set of display registers + * @crtc: the crtc of which the registers are going to be updated +@@ -95,7 +98,7 @@ + min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100); + max = vblank_start - 1; + +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + + if (min <= 0 || max <= 0) + return; +@@ -125,11 +128,11 @@ + break; + } + +- local_irq_enable(); ++ local_unlock_irq(pipe_update_lock); + + timeout = schedule_timeout(timeout); + +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + } + + finish_wait(wq, &wait); +@@ -181,7 +184,7 @@ + crtc->base.state->event = NULL; + } + +- local_irq_enable(); ++ local_unlock_irq(pipe_update_lock); + + if (crtc->debug.start_vbl_count && + crtc->debug.start_vbl_count != end_vbl_count) { +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/msm/msm_gem_shrinker.c linux-4.9.28/drivers/gpu/drm/msm/msm_gem_shrinker.c +--- linux-4.9.28.orig/drivers/gpu/drm/msm/msm_gem_shrinker.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/msm/msm_gem_shrinker.c 2017-05-19 03:37:25.142175268 +0200 +@@ -23,7 +23,7 @@ + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) ++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ +diff -Nur linux-4.9.28.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.9.28/drivers/gpu/drm/radeon/radeon_display.c +--- linux-4.9.28.orig/drivers/gpu/drm/radeon/radeon_display.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/gpu/drm/radeon/radeon_display.c 2017-05-19 03:37:25.142175268 +0200 +@@ -1845,6 +1845,7 @@ + struct radeon_device *rdev = dev->dev_private; + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_disable_rt(); + + /* Get optional system timestamp before query. */ + if (stime) +@@ -1937,6 +1938,7 @@ + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_enable_rt(); + + /* Decode into vertical and horizontal scanout position. */ + *vpos = position & 0x1fff; +diff -Nur linux-4.9.28.orig/drivers/hv/vmbus_drv.c linux-4.9.28/drivers/hv/vmbus_drv.c +--- linux-4.9.28.orig/drivers/hv/vmbus_drv.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/hv/vmbus_drv.c 2017-05-19 03:37:25.142175268 +0200 +@@ -761,6 +761,8 @@ + void *page_addr; + struct hv_message *msg; + union hv_synic_event_flags *event; ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + bool handled = false; + + page_addr = hv_context.synic_event_page[cpu]; +@@ -808,7 +810,7 @@ + tasklet_schedule(hv_context.msg_dpc[cpu]); + } + +- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); ++ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); + } + + +diff -Nur linux-4.9.28.orig/drivers/ide/alim15x3.c linux-4.9.28/drivers/ide/alim15x3.c +--- linux-4.9.28.orig/drivers/ide/alim15x3.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/alim15x3.c 2017-05-19 03:37:25.142175268 +0200 +@@ -234,7 +234,7 @@ + + isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (m5229_revision < 0xC2) { + /* +@@ -325,7 +325,7 @@ + } + pci_dev_put(north); + pci_dev_put(isa_dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return 0; + } + +diff -Nur linux-4.9.28.orig/drivers/ide/hpt366.c linux-4.9.28/drivers/ide/hpt366.c +--- linux-4.9.28.orig/drivers/ide/hpt366.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/hpt366.c 2017-05-19 03:37:25.142175268 +0200 +@@ -1236,7 +1236,7 @@ + + dma_old = inb(base + 2); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + dma_new = dma_old; + pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); +@@ -1247,7 +1247,7 @@ + if (dma_new != dma_old) + outb(dma_new, base + 2); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", + hwif->name, base, base + 7); +diff -Nur linux-4.9.28.orig/drivers/ide/ide-io.c linux-4.9.28/drivers/ide/ide-io.c +--- linux-4.9.28.orig/drivers/ide/ide-io.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/ide-io.c 2017-05-19 03:37:25.146175423 +0200 +@@ -659,7 +659,7 @@ + /* disable_irq_nosync ?? */ + disable_irq(hwif->irq); + /* local CPU only, as if we were handling an interrupt */ +- local_irq_disable(); ++ local_irq_disable_nort(); + if (hwif->polling) { + startstop = handler(drive); + } else if (drive_is_ready(drive)) { +diff -Nur linux-4.9.28.orig/drivers/ide/ide-iops.c linux-4.9.28/drivers/ide/ide-iops.c +--- linux-4.9.28.orig/drivers/ide/ide-iops.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/ide-iops.c 2017-05-19 03:37:25.146175423 +0200 +@@ -129,12 +129,12 @@ + if ((stat & ATA_BUSY) == 0) + break; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + *rstat = stat; + return -EBUSY; + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + /* + * Allow status to settle, then read it again. +diff -Nur linux-4.9.28.orig/drivers/ide/ide-io-std.c linux-4.9.28/drivers/ide/ide-io-std.c +--- linux-4.9.28.orig/drivers/ide/ide-io-std.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/ide-io-std.c 2017-05-19 03:37:25.142175268 +0200 +@@ -175,7 +175,7 @@ + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -186,7 +186,7 @@ + insl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +@@ -219,7 +219,7 @@ + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -230,7 +230,7 @@ + outsl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +diff -Nur linux-4.9.28.orig/drivers/ide/ide-probe.c linux-4.9.28/drivers/ide/ide-probe.c +--- linux-4.9.28.orig/drivers/ide/ide-probe.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/ide-probe.c 2017-05-19 03:37:25.146175423 +0200 +@@ -196,10 +196,10 @@ + int bswap = 1; + + /* local CPU only; some systems need this */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + /* read 512 bytes of id info */ + hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + drive->dev_flags |= IDE_DFLAG_ID_READ; + #ifdef DEBUG +diff -Nur linux-4.9.28.orig/drivers/ide/ide-taskfile.c linux-4.9.28/drivers/ide/ide-taskfile.c +--- linux-4.9.28.orig/drivers/ide/ide-taskfile.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/ide/ide-taskfile.c 2017-05-19 03:37:25.146175423 +0200 +@@ -250,7 +250,7 @@ + + page_is_high = PageHighMem(page); + if (page_is_high) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + buf = kmap_atomic(page) + offset; + +@@ -271,7 +271,7 @@ + kunmap_atomic(buf); + + if (page_is_high) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + len -= nr_bytes; + } +@@ -414,7 +414,7 @@ + } + + if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) +- local_irq_disable(); ++ local_irq_disable_nort(); + + ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); + +diff -Nur linux-4.9.28.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.9.28/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +--- linux-4.9.28.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2017-05-19 03:37:25.146175423 +0200 +@@ -902,7 +902,7 @@ + + ipoib_dbg_mcast(priv, "restarting multicast task\n"); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + netif_addr_lock(dev); + spin_lock(&priv->lock); + +@@ -984,7 +984,7 @@ + + spin_unlock(&priv->lock); + netif_addr_unlock(dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* + * make sure the in-flight joins have finished before we attempt +diff -Nur linux-4.9.28.orig/drivers/input/gameport/gameport.c linux-4.9.28/drivers/input/gameport/gameport.c +--- linux-4.9.28.orig/drivers/input/gameport/gameport.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/input/gameport/gameport.c 2017-05-19 03:37:25.146175423 +0200 +@@ -91,13 +91,13 @@ + tx = ~0; + + for (i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + t1 = ktime_get_ns(); + for (t = 0; t < 50; t++) + gameport_read(gameport); + t2 = ktime_get_ns(); + t3 = ktime_get_ns(); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + t = (t2 - t1) - (t3 - t2); + if (t < tx) +@@ -124,12 +124,12 @@ + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + GET_TIME(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + GET_TIME(t2); + GET_TIME(t3); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; + } +@@ -148,11 +148,11 @@ + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + t1 = rdtsc(); + for (t = 0; t < 50; t++) gameport_read(gameport); + t2 = rdtsc(); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if (t2 - t1 < tx) tx = t2 - t1; + } +diff -Nur linux-4.9.28.orig/drivers/iommu/amd_iommu.c linux-4.9.28/drivers/iommu/amd_iommu.c +--- linux-4.9.28.orig/drivers/iommu/amd_iommu.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/iommu/amd_iommu.c 2017-05-19 03:37:25.146175423 +0200 +@@ -1923,10 +1923,10 @@ + int ret; + + /* +- * Must be called with IRQs disabled. Warn here to detect early +- * when its not. ++ * Must be called with IRQs disabled on a non RT kernel. Warn here to ++ * detect early when its not. + */ +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + /* lock domain */ + spin_lock(&domain->lock); +@@ -2094,10 +2094,10 @@ + struct protection_domain *domain; + + /* +- * Must be called with IRQs disabled. Warn here to detect early +- * when its not. ++ * Must be called with IRQs disabled on a non RT kernel. Warn here to ++ * detect early when its not. + */ +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + if (WARN_ON(!dev_data->domain)) + return; +diff -Nur linux-4.9.28.orig/drivers/iommu/intel-iommu.c linux-4.9.28/drivers/iommu/intel-iommu.c +--- linux-4.9.28.orig/drivers/iommu/intel-iommu.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/iommu/intel-iommu.c 2017-05-19 03:37:25.146175423 +0200 +@@ -479,7 +479,7 @@ + struct deferred_flush_table *tables; + }; + +-DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); ++static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); + + /* bitmap for indexing intel_iommus */ + static int g_num_of_iommus; +@@ -3716,10 +3716,8 @@ + struct intel_iommu *iommu; + struct deferred_flush_entry *entry; + struct deferred_flush_data *flush_data; +- unsigned int cpuid; + +- cpuid = get_cpu(); +- flush_data = per_cpu_ptr(&deferred_flush, cpuid); ++ flush_data = raw_cpu_ptr(&deferred_flush); + + /* Flush all CPUs' entries to avoid deferring too much. If + * this becomes a bottleneck, can just flush us, and rely on +@@ -3752,8 +3750,6 @@ + } + flush_data->size++; + spin_unlock_irqrestore(&flush_data->lock, flags); +- +- put_cpu(); + } + + static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) +diff -Nur linux-4.9.28.orig/drivers/iommu/iova.c linux-4.9.28/drivers/iommu/iova.c +--- linux-4.9.28.orig/drivers/iommu/iova.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/iommu/iova.c 2017-05-19 03:37:25.146175423 +0200 +@@ -22,6 +22,7 @@ + #include <linux/slab.h> + #include <linux/smp.h> + #include <linux/bitops.h> ++#include <linux/cpu.h> + + static bool iova_rcache_insert(struct iova_domain *iovad, + unsigned long pfn, +@@ -420,10 +421,8 @@ + + /* Try replenishing IOVAs by flushing rcache. */ + flushed_rcache = true; +- preempt_disable(); + for_each_online_cpu(cpu) + free_cpu_cached_iovas(cpu, iovad); +- preempt_enable(); + goto retry; + } + +@@ -751,7 +750,7 @@ + bool can_insert = false; + unsigned long flags; + +- cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); ++ cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); + spin_lock_irqsave(&cpu_rcache->lock, flags); + + if (!iova_magazine_full(cpu_rcache->loaded)) { +@@ -781,7 +780,6 @@ + iova_magazine_push(cpu_rcache->loaded, iova_pfn); + + spin_unlock_irqrestore(&cpu_rcache->lock, flags); +- put_cpu_ptr(rcache->cpu_rcaches); + + if (mag_to_free) { + iova_magazine_free_pfns(mag_to_free, iovad); +@@ -815,7 +813,7 @@ + bool has_pfn = false; + unsigned long flags; + +- cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); ++ cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); + spin_lock_irqsave(&cpu_rcache->lock, flags); + + if (!iova_magazine_empty(cpu_rcache->loaded)) { +@@ -837,7 +835,6 @@ + iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); + + spin_unlock_irqrestore(&cpu_rcache->lock, flags); +- put_cpu_ptr(rcache->cpu_rcaches); + + return iova_pfn; + } +diff -Nur linux-4.9.28.orig/drivers/leds/trigger/Kconfig linux-4.9.28/drivers/leds/trigger/Kconfig +--- linux-4.9.28.orig/drivers/leds/trigger/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/leds/trigger/Kconfig 2017-05-19 03:37:25.146175423 +0200 +@@ -69,7 +69,7 @@ + + config LEDS_TRIGGER_CPU + bool "LED CPU Trigger" +- depends on LEDS_TRIGGERS ++ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE + help + This allows LEDs to be controlled by active CPUs. This shows + the active CPUs across an array of LEDs so you can see which +diff -Nur linux-4.9.28.orig/drivers/md/bcache/Kconfig linux-4.9.28/drivers/md/bcache/Kconfig +--- linux-4.9.28.orig/drivers/md/bcache/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/md/bcache/Kconfig 2017-05-19 03:37:25.146175423 +0200 +@@ -1,6 +1,7 @@ + + config BCACHE + tristate "Block device as cache" ++ depends on !PREEMPT_RT_FULL + ---help--- + Allows a block device to be used as cache for other devices; uses + a btree for indexing and the layout is optimized for SSDs. +diff -Nur linux-4.9.28.orig/drivers/md/dm-rq.c linux-4.9.28/drivers/md/dm-rq.c +--- linux-4.9.28.orig/drivers/md/dm-rq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/md/dm-rq.c 2017-05-19 03:37:25.146175423 +0200 +@@ -842,7 +842,7 @@ + /* Establish tio->ti before queuing work (map_tio_request) */ + tio->ti = ti; + kthread_queue_work(&md->kworker, &tio->work); +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + } + } + +diff -Nur linux-4.9.28.orig/drivers/md/raid5.c linux-4.9.28/drivers/md/raid5.c +--- linux-4.9.28.orig/drivers/md/raid5.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/md/raid5.c 2017-05-19 03:37:25.146175423 +0200 +@@ -1928,8 +1928,9 @@ + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -1985,7 +1986,8 @@ + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, +@@ -6391,6 +6393,7 @@ + __func__, cpu); + return -ENOMEM; + } ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + return 0; + } + +@@ -6401,7 +6404,6 @@ + conf->percpu = alloc_percpu(struct raid5_percpu); + if (!conf->percpu) + return -ENOMEM; +- + err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); + if (!err) { + conf->scribble_disks = max(conf->raid_disks, +diff -Nur linux-4.9.28.orig/drivers/md/raid5.h linux-4.9.28/drivers/md/raid5.h +--- linux-4.9.28.orig/drivers/md/raid5.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/md/raid5.h 2017-05-19 03:37:25.150175574 +0200 +@@ -504,6 +504,7 @@ + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + struct flex_array *scribble; /* space for constructing buffer + * lists and performing address +diff -Nur linux-4.9.28.orig/drivers/misc/Kconfig linux-4.9.28/drivers/misc/Kconfig +--- linux-4.9.28.orig/drivers/misc/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/misc/Kconfig 2017-05-19 03:37:25.150175574 +0200 +@@ -54,6 +54,7 @@ + config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) ++ default y if PREEMPT_RT_FULL + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using +@@ -69,8 +70,7 @@ + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -84,6 +84,15 @@ + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y if !PREEMPT_RT_FULL ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. ++ ++ + config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n +diff -Nur linux-4.9.28.orig/drivers/mmc/host/mmci.c linux-4.9.28/drivers/mmc/host/mmci.c +--- linux-4.9.28.orig/drivers/mmc/host/mmci.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/mmc/host/mmci.c 2017-05-19 03:37:25.150175574 +0200 +@@ -1147,15 +1147,12 @@ + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; +- unsigned long flags; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + +- local_irq_save(flags); +- + do { + unsigned int remain, len; + char *buffer; +@@ -1195,8 +1192,6 @@ + + sg_miter_stop(sg_miter); + +- local_irq_restore(flags); +- + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. +diff -Nur linux-4.9.28.orig/drivers/net/ethernet/3com/3c59x.c linux-4.9.28/drivers/net/ethernet/3com/3c59x.c +--- linux-4.9.28.orig/drivers/net/ethernet/3com/3c59x.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/net/ethernet/3com/3c59x.c 2017-05-19 03:37:25.150175574 +0200 +@@ -842,9 +842,9 @@ + { + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + #endif + +@@ -1910,12 +1910,12 @@ + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } + +diff -Nur linux-4.9.28.orig/drivers/net/ethernet/realtek/8139too.c linux-4.9.28/drivers/net/ethernet/realtek/8139too.c +--- linux-4.9.28.orig/drivers/net/ethernet/realtek/8139too.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/net/ethernet/realtek/8139too.c 2017-05-19 03:37:25.150175574 +0200 +@@ -2233,7 +2233,7 @@ + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + +- disable_irq(irq); ++ disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); + } +diff -Nur linux-4.9.28.orig/drivers/net/wireless/intersil/orinoco/orinoco_usb.c linux-4.9.28/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +--- linux-4.9.28.orig/drivers/net/wireless/intersil/orinoco/orinoco_usb.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/net/wireless/intersil/orinoco/orinoco_usb.c 2017-05-19 03:37:25.150175574 +0200 +@@ -697,7 +697,7 @@ + while (!ctx->done.done && msecs--) + udelay(1000); + } else { +- wait_event_interruptible(ctx->done.wait, ++ swait_event_interruptible(ctx->done.wait, + ctx->done.done); + } + break; +diff -Nur linux-4.9.28.orig/drivers/pci/access.c linux-4.9.28/drivers/pci/access.c +--- linux-4.9.28.orig/drivers/pci/access.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/pci/access.c 2017-05-19 03:37:25.150175574 +0200 +@@ -672,7 +672,7 @@ + WARN_ON(!dev->block_cfg_access); + + dev->block_cfg_access = 0; +- wake_up_all(&pci_cfg_wait); ++ wake_up_all_locked(&pci_cfg_wait); + raw_spin_unlock_irqrestore(&pci_lock, flags); + } + EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); +diff -Nur linux-4.9.28.orig/drivers/pinctrl/qcom/pinctrl-msm.c linux-4.9.28/drivers/pinctrl/qcom/pinctrl-msm.c +--- linux-4.9.28.orig/drivers/pinctrl/qcom/pinctrl-msm.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/pinctrl/qcom/pinctrl-msm.c 2017-05-19 03:37:25.150175574 +0200 +@@ -61,7 +61,7 @@ + struct notifier_block restart_nb; + int irq; + +- spinlock_t lock; ++ raw_spinlock_t lock; + + DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); + DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); +@@ -153,14 +153,14 @@ + if (WARN_ON(i == g->nfuncs)) + return -EINVAL; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->ctl_reg); + val &= ~mask; + val |= i << g->mux_bit; + writel(val, pctrl->regs + g->ctl_reg); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; + } +@@ -323,14 +323,14 @@ + break; + case PIN_CONFIG_OUTPUT: + /* set output value */ +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + val = readl(pctrl->regs + g->io_reg); + if (arg) + val |= BIT(g->out_bit); + else + val &= ~BIT(g->out_bit); + writel(val, pctrl->regs + g->io_reg); +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + /* enable output */ + arg = 1; +@@ -351,12 +351,12 @@ + return -EINVAL; + } + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + val = readl(pctrl->regs + g->ctl_reg); + val &= ~(mask << bit); + val |= arg << bit; + writel(val, pctrl->regs + g->ctl_reg); +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + } + + return 0; +@@ -384,13 +384,13 @@ + + g = &pctrl->soc->groups[offset]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->ctl_reg); + val &= ~BIT(g->oe_bit); + writel(val, pctrl->regs + g->ctl_reg); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; + } +@@ -404,7 +404,7 @@ + + g = &pctrl->soc->groups[offset]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->io_reg); + if (value) +@@ -417,7 +417,7 @@ + val |= BIT(g->oe_bit); + writel(val, pctrl->regs + g->ctl_reg); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; + } +@@ -443,7 +443,7 @@ + + g = &pctrl->soc->groups[offset]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->io_reg); + if (value) +@@ -452,7 +452,7 @@ + val &= ~BIT(g->out_bit); + writel(val, pctrl->regs + g->io_reg); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + } + + #ifdef CONFIG_DEBUG_FS +@@ -571,7 +571,7 @@ + + g = &pctrl->soc->groups[d->hwirq]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->intr_cfg_reg); + val &= ~BIT(g->intr_enable_bit); +@@ -579,7 +579,7 @@ + + clear_bit(d->hwirq, pctrl->enabled_irqs); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + } + + static void msm_gpio_irq_unmask(struct irq_data *d) +@@ -592,7 +592,7 @@ + + g = &pctrl->soc->groups[d->hwirq]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->intr_cfg_reg); + val |= BIT(g->intr_enable_bit); +@@ -600,7 +600,7 @@ + + set_bit(d->hwirq, pctrl->enabled_irqs); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + } + + static void msm_gpio_irq_ack(struct irq_data *d) +@@ -613,7 +613,7 @@ + + g = &pctrl->soc->groups[d->hwirq]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + val = readl(pctrl->regs + g->intr_status_reg); + if (g->intr_ack_high) +@@ -625,7 +625,7 @@ + if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) + msm_gpio_update_dual_edge_pos(pctrl, g, d); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + } + + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) +@@ -638,7 +638,7 @@ + + g = &pctrl->soc->groups[d->hwirq]; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + /* + * For hw without possibility of detecting both edges +@@ -712,7 +712,7 @@ + if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) + msm_gpio_update_dual_edge_pos(pctrl, g, d); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) + irq_set_handler_locked(d, handle_level_irq); +@@ -728,11 +728,11 @@ + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned long flags; + +- spin_lock_irqsave(&pctrl->lock, flags); ++ raw_spin_lock_irqsave(&pctrl->lock, flags); + + irq_set_irq_wake(pctrl->irq, on); + +- spin_unlock_irqrestore(&pctrl->lock, flags); ++ raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; + } +@@ -878,7 +878,7 @@ + pctrl->soc = soc_data; + pctrl->chip = msm_gpio_template; + +- spin_lock_init(&pctrl->lock); ++ raw_spin_lock_init(&pctrl->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pctrl->regs = devm_ioremap_resource(&pdev->dev, res); +diff -Nur linux-4.9.28.orig/drivers/scsi/fcoe/fcoe.c linux-4.9.28/drivers/scsi/fcoe/fcoe.c +--- linux-4.9.28.orig/drivers/scsi/fcoe/fcoe.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/scsi/fcoe/fcoe.c 2017-05-19 03:37:25.150175574 +0200 +@@ -1455,11 +1455,11 @@ + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1646,11 +1646,11 @@ + return 0; + } + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); +- put_cpu(); ++ put_cpu_light(); + return -EINVAL; + } + +@@ -1693,7 +1693,7 @@ + */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { + if (stats->ErrorFrames < 5) + printk(KERN_WARNING "fcoe: FCoE version " +@@ -1725,13 +1725,13 @@ + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +diff -Nur linux-4.9.28.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.9.28/drivers/scsi/fcoe/fcoe_ctlr.c +--- linux-4.9.28.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/scsi/fcoe/fcoe_ctlr.c 2017-05-19 03:37:25.150175574 +0200 +@@ -834,7 +834,7 @@ + + INIT_LIST_HEAD(&del_list); + +- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -870,7 +870,7 @@ + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ +diff -Nur linux-4.9.28.orig/drivers/scsi/libfc/fc_exch.c linux-4.9.28/drivers/scsi/libfc/fc_exch.c +--- linux-4.9.28.orig/drivers/scsi/libfc/fc_exch.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/scsi/libfc/fc_exch.c 2017-05-19 03:37:25.150175574 +0200 +@@ -814,10 +814,10 @@ + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { +diff -Nur linux-4.9.28.orig/drivers/scsi/libsas/sas_ata.c linux-4.9.28/drivers/scsi/libsas/sas_ata.c +--- linux-4.9.28.orig/drivers/scsi/libsas/sas_ata.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/scsi/libsas/sas_ata.c 2017-05-19 03:37:25.150175574 +0200 +@@ -190,7 +190,7 @@ + /* TODO: audit callers to ensure they are ready for qc_issue to + * unconditionally re-enable interrupts + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + spin_unlock(ap->lock); + + /* If the device fell off, no sense in issuing commands */ +@@ -252,7 +252,7 @@ + + out: + spin_lock(ap->lock); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return ret; + } + +diff -Nur linux-4.9.28.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.9.28/drivers/scsi/qla2xxx/qla_inline.h +--- linux-4.9.28.orig/drivers/scsi/qla2xxx/qla_inline.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/scsi/qla2xxx/qla_inline.h 2017-05-19 03:37:25.150175574 +0200 +@@ -59,12 +59,12 @@ + { + unsigned long flags; + struct qla_hw_data *ha = rsp->hw; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (IS_P3P_TYPE(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + static inline uint8_t * +diff -Nur linux-4.9.28.orig/drivers/scsi/qla2xxx/qla_isr.c linux-4.9.28/drivers/scsi/qla2xxx/qla_isr.c +--- linux-4.9.28.orig/drivers/scsi/qla2xxx/qla_isr.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/scsi/qla2xxx/qla_isr.c 2017-05-19 03:37:25.154175725 +0200 +@@ -3125,7 +3125,11 @@ + * kref_put(). + */ + kref_get(&qentry->irq_notify.kref); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ swork_queue(&qentry->irq_notify.swork); ++#else + schedule_work(&qentry->irq_notify.work); ++#endif + } + + /* +diff -Nur linux-4.9.28.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.9.28/drivers/thermal/x86_pkg_temp_thermal.c +--- linux-4.9.28.orig/drivers/thermal/x86_pkg_temp_thermal.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/thermal/x86_pkg_temp_thermal.c 2017-05-19 03:37:25.154175725 +0200 +@@ -29,6 +29,7 @@ + #include <linux/pm.h> + #include <linux/thermal.h> + #include <linux/debugfs.h> ++#include <linux/swork.h> + #include <asm/cpu_device_id.h> + #include <asm/mce.h> + +@@ -353,7 +354,7 @@ + } + } + +-static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++static void platform_thermal_notify_work(struct swork_event *event) + { + unsigned long flags; + int cpu = smp_processor_id(); +@@ -370,7 +371,7 @@ + pkg_work_scheduled[phy_id]) { + disable_pkg_thres_interrupt(); + spin_unlock_irqrestore(&pkg_work_lock, flags); +- return -EINVAL; ++ return; + } + pkg_work_scheduled[phy_id] = 1; + spin_unlock_irqrestore(&pkg_work_lock, flags); +@@ -379,9 +380,48 @@ + schedule_delayed_work_on(cpu, + &per_cpu(pkg_temp_thermal_threshold_work, cpu), + msecs_to_jiffies(notify_delay_ms)); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct swork_event notify_work; ++ ++static int thermal_notify_work_init(void) ++{ ++ int err; ++ ++ err = swork_get(); ++ if (err) ++ return err; ++ ++ INIT_SWORK(¬ify_work, platform_thermal_notify_work); + return 0; + } + ++static void thermal_notify_work_cleanup(void) ++{ ++ swork_put(); ++} ++ ++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++{ ++ swork_queue(¬ify_work); ++ return 0; ++} ++ ++#else /* !CONFIG_PREEMPT_RT_FULL */ ++ ++static int thermal_notify_work_init(void) { return 0; } ++ ++static void thermal_notify_work_cleanup(void) { } ++ ++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++{ ++ platform_thermal_notify_work(NULL); ++ ++ return 0; ++} ++#endif /* CONFIG_PREEMPT_RT_FULL */ ++ + static int find_siblings_cpu(int cpu) + { + int i; +@@ -585,6 +625,9 @@ + if (!x86_match_cpu(pkg_temp_thermal_ids)) + return -ENODEV; + ++ if (!thermal_notify_work_init()) ++ return -ENODEV; ++ + spin_lock_init(&pkg_work_lock); + platform_thermal_package_notify = + pkg_temp_thermal_platform_thermal_notify; +@@ -609,7 +652,7 @@ + kfree(pkg_work_scheduled); + platform_thermal_package_notify = NULL; + platform_thermal_package_rate_control = NULL; +- ++ thermal_notify_work_cleanup(); + return -ENODEV; + } + +@@ -634,6 +677,7 @@ + mutex_unlock(&phy_dev_list_mutex); + platform_thermal_package_notify = NULL; + platform_thermal_package_rate_control = NULL; ++ thermal_notify_work_cleanup(); + for_each_online_cpu(i) + cancel_delayed_work_sync( + &per_cpu(pkg_temp_thermal_threshold_work, i)); +diff -Nur linux-4.9.28.orig/drivers/tty/serial/8250/8250_core.c linux-4.9.28/drivers/tty/serial/8250/8250_core.c +--- linux-4.9.28.orig/drivers/tty/serial/8250/8250_core.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/tty/serial/8250/8250_core.c 2017-05-19 03:37:25.154175725 +0200 +@@ -58,7 +58,16 @@ + + static unsigned int skip_txen_test; /* force skip of txen test at init time */ + +-#define PASS_LIMIT 512 ++/* ++ * On -rt we can have a more delays, and legitimately ++ * so - so don't drop work spuriously and spam the ++ * syslog: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define PASS_LIMIT 1000000 ++#else ++# define PASS_LIMIT 512 ++#endif + + #include <asm/serial.h> + /* +diff -Nur linux-4.9.28.orig/drivers/tty/serial/8250/8250_port.c linux-4.9.28/drivers/tty/serial/8250/8250_port.c +--- linux-4.9.28.orig/drivers/tty/serial/8250/8250_port.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/tty/serial/8250/8250_port.c 2017-05-19 03:37:25.154175725 +0200 +@@ -35,6 +35,7 @@ + #include <linux/nmi.h> + #include <linux/mutex.h> + #include <linux/slab.h> ++#include <linux/kdb.h> + #include <linux/uaccess.h> + #include <linux/pm_runtime.h> + #include <linux/timer.h> +@@ -3144,9 +3145,9 @@ + + serial8250_rpm_get(up); + +- if (port->sysrq) ++ if (port->sysrq || oops_in_progress) + locked = 0; +- else if (oops_in_progress) ++ else if (in_kdb_printk()) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); +diff -Nur linux-4.9.28.orig/drivers/tty/serial/amba-pl011.c linux-4.9.28/drivers/tty/serial/amba-pl011.c +--- linux-4.9.28.orig/drivers/tty/serial/amba-pl011.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/tty/serial/amba-pl011.c 2017-05-19 03:37:25.154175725 +0200 +@@ -2194,13 +2194,19 @@ + + clk_enable(uap->clk); + +- local_irq_save(flags); ++ /* ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -2224,8 +2230,7 @@ + pl011_write(old_cr, uap, REG_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } +diff -Nur linux-4.9.28.orig/drivers/tty/serial/omap-serial.c linux-4.9.28/drivers/tty/serial/omap-serial.c +--- linux-4.9.28.orig/drivers/tty/serial/omap-serial.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/tty/serial/omap-serial.c 2017-05-19 03:37:25.154175725 +0200 +@@ -1257,13 +1257,10 @@ + + pm_runtime_get_sync(up->dev); + +- local_irq_save(flags); +- if (up->port.sysrq) +- locked = 0; +- else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -1292,8 +1289,7 @@ + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init +diff -Nur linux-4.9.28.orig/drivers/usb/core/hcd.c linux-4.9.28/drivers/usb/core/hcd.c +--- linux-4.9.28.orig/drivers/usb/core/hcd.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/usb/core/hcd.c 2017-05-19 03:37:25.154175725 +0200 +@@ -1764,9 +1764,9 @@ + * and no one may trigger the above deadlock situation when + * running complete() in tasklet. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + urb->complete(urb); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + usb_anchor_resume_wakeups(anchor); + atomic_dec(&urb->use_count); +diff -Nur linux-4.9.28.orig/drivers/usb/gadget/function/f_fs.c linux-4.9.28/drivers/usb/gadget/function/f_fs.c +--- linux-4.9.28.orig/drivers/usb/gadget/function/f_fs.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/usb/gadget/function/f_fs.c 2017-05-19 03:37:25.154175725 +0200 +@@ -1593,7 +1593,7 @@ + pr_info("%s(): freeing\n", __func__); + ffs_data_clear(ffs); + BUG_ON(waitqueue_active(&ffs->ev.waitq) || +- waitqueue_active(&ffs->ep0req_completion.wait)); ++ swait_active(&ffs->ep0req_completion.wait)); + kfree(ffs->dev_name); + kfree(ffs); + } +diff -Nur linux-4.9.28.orig/drivers/usb/gadget/legacy/inode.c linux-4.9.28/drivers/usb/gadget/legacy/inode.c +--- linux-4.9.28.orig/drivers/usb/gadget/legacy/inode.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/drivers/usb/gadget/legacy/inode.c 2017-05-19 03:37:25.154175725 +0200 +@@ -346,7 +346,7 @@ + spin_unlock_irq (&epdata->dev->lock); + + if (likely (value == 0)) { +- value = wait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible (done.wait, done.done); + if (value != 0) { + spin_lock_irq (&epdata->dev->lock); + if (likely (epdata->ep != NULL)) { +@@ -355,7 +355,7 @@ + usb_ep_dequeue (epdata->ep, epdata->req); + spin_unlock_irq (&epdata->dev->lock); + +- wait_event (done.wait, done.done); ++ swait_event (done.wait, done.done); + if (epdata->status == -ECONNRESET) + epdata->status = -EINTR; + } else { +diff -Nur linux-4.9.28.orig/fs/aio.c linux-4.9.28/fs/aio.c +--- linux-4.9.28.orig/fs/aio.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/aio.c 2017-05-19 03:37:25.154175725 +0200 +@@ -40,6 +40,7 @@ + #include <linux/ramfs.h> + #include <linux/percpu-refcount.h> + #include <linux/mount.h> ++#include <linux/swork.h> + + #include <asm/kmap_types.h> + #include <asm/uaccess.h> +@@ -115,7 +116,7 @@ + struct page **ring_pages; + long nr_pages; + +- struct work_struct free_work; ++ struct swork_event free_work; + + /* + * signals when all in-flight requests are done +@@ -258,6 +259,7 @@ + .mount = aio_mount, + .kill_sb = kill_anon_super, + }; ++ BUG_ON(swork_get()); + aio_mnt = kern_mount(&aio_fs); + if (IS_ERR(aio_mnt)) + panic("Failed to create aio fs mount."); +@@ -581,9 +583,9 @@ + return cancel(&kiocb->common); + } + +-static void free_ioctx(struct work_struct *work) ++static void free_ioctx(struct swork_event *sev) + { +- struct kioctx *ctx = container_of(work, struct kioctx, free_work); ++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); + + pr_debug("freeing %p\n", ctx); + +@@ -602,8 +604,8 @@ + if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) + complete(&ctx->rq_wait->comp); + +- INIT_WORK(&ctx->free_work, free_ioctx); +- schedule_work(&ctx->free_work); ++ INIT_SWORK(&ctx->free_work, free_ioctx); ++ swork_queue(&ctx->free_work); + } + + /* +@@ -611,9 +613,9 @@ + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users(struct percpu_ref *ref) ++static void free_ioctx_users_work(struct swork_event *sev) + { +- struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); + struct aio_kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -632,6 +634,14 @@ + percpu_ref_put(&ctx->reqs); + } + ++static void free_ioctx_users(struct percpu_ref *ref) ++{ ++ struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ ++ INIT_SWORK(&ctx->free_work, free_ioctx_users_work); ++ swork_queue(&ctx->free_work); ++} ++ + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + { + unsigned i, new_nr; +diff -Nur linux-4.9.28.orig/fs/autofs4/autofs_i.h linux-4.9.28/fs/autofs4/autofs_i.h +--- linux-4.9.28.orig/fs/autofs4/autofs_i.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/autofs4/autofs_i.h 2017-05-19 03:37:25.154175725 +0200 +@@ -31,6 +31,7 @@ + #include <linux/sched.h> + #include <linux/mount.h> + #include <linux/namei.h> ++#include <linux/delay.h> + #include <asm/current.h> + #include <linux/uaccess.h> + +diff -Nur linux-4.9.28.orig/fs/autofs4/expire.c linux-4.9.28/fs/autofs4/expire.c +--- linux-4.9.28.orig/fs/autofs4/expire.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/autofs4/expire.c 2017-05-19 03:37:25.154175725 +0200 +@@ -148,7 +148,7 @@ + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + spin_unlock(&p->d_lock); +diff -Nur linux-4.9.28.orig/fs/buffer.c linux-4.9.28/fs/buffer.c +--- linux-4.9.28.orig/fs/buffer.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/buffer.c 2017-05-19 03:37:25.158175874 +0200 +@@ -301,8 +301,7 @@ + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -315,8 +314,7 @@ + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -328,9 +326,7 @@ + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -358,8 +354,7 @@ + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -371,15 +366,12 @@ + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3383,6 +3375,7 @@ + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +diff -Nur linux-4.9.28.orig/fs/cifs/readdir.c linux-4.9.28/fs/cifs/readdir.c +--- linux-4.9.28.orig/fs/cifs/readdir.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/cifs/readdir.c 2017-05-19 03:37:25.158175874 +0200 +@@ -80,7 +80,7 @@ + struct inode *inode; + struct super_block *sb = parent->d_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); + +diff -Nur linux-4.9.28.orig/fs/dcache.c linux-4.9.28/fs/dcache.c +--- linux-4.9.28.orig/fs/dcache.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/dcache.c 2017-05-19 03:37:25.158175874 +0200 +@@ -19,6 +19,7 @@ + #include <linux/mm.h> + #include <linux/fs.h> + #include <linux/fsnotify.h> ++#include <linux/delay.h> + #include <linux/slab.h> + #include <linux/init.h> + #include <linux/hash.h> +@@ -750,6 +751,8 @@ + */ + void dput(struct dentry *dentry) + { ++ struct dentry *parent; ++ + if (unlikely(!dentry)) + return; + +@@ -788,9 +791,18 @@ + return; + + kill_it: +- dentry = dentry_kill(dentry); +- if (dentry) { +- cond_resched(); ++ parent = dentry_kill(dentry); ++ if (parent) { ++ int r; ++ ++ if (parent == dentry) { ++ /* the task with the highest priority won't schedule */ ++ r = cond_resched(); ++ if (!r) ++ cpu_chill(); ++ } else { ++ dentry = parent; ++ } + goto repeat; + } + } +@@ -2324,7 +2336,7 @@ + if (dentry->d_lockref.count == 1) { + if (!spin_trylock(&inode->i_lock)) { + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto again; + } + dentry->d_flags &= ~DCACHE_CANT_MOUNT; +@@ -2384,21 +2396,24 @@ + + static void d_wait_lookup(struct dentry *dentry) + { +- if (d_in_lookup(dentry)) { +- DECLARE_WAITQUEUE(wait, current); +- add_wait_queue(dentry->d_wait, &wait); +- do { +- set_current_state(TASK_UNINTERRUPTIBLE); +- spin_unlock(&dentry->d_lock); +- schedule(); +- spin_lock(&dentry->d_lock); +- } while (d_in_lookup(dentry)); +- } ++ struct swait_queue __wait; ++ ++ if (!d_in_lookup(dentry)) ++ return; ++ ++ INIT_LIST_HEAD(&__wait.task_list); ++ do { ++ prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); ++ spin_unlock(&dentry->d_lock); ++ schedule(); ++ spin_lock(&dentry->d_lock); ++ } while (d_in_lookup(dentry)); ++ finish_swait(dentry->d_wait, &__wait); + } + + struct dentry *d_alloc_parallel(struct dentry *parent, + const struct qstr *name, +- wait_queue_head_t *wq) ++ struct swait_queue_head *wq) + { + unsigned int hash = name->hash; + struct hlist_bl_head *b = in_lookup_hash(parent, hash); +@@ -2507,7 +2522,7 @@ + hlist_bl_lock(b); + dentry->d_flags &= ~DCACHE_PAR_LOOKUP; + __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); +- wake_up_all(dentry->d_wait); ++ swake_up_all(dentry->d_wait); + dentry->d_wait = NULL; + hlist_bl_unlock(b); + INIT_HLIST_NODE(&dentry->d_u.d_alias); +@@ -3604,6 +3619,11 @@ + + void __init vfs_caches_init_early(void) + { ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++) ++ INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]); ++ + dcache_init_early(); + inode_init_early(); + } +diff -Nur linux-4.9.28.orig/fs/eventpoll.c linux-4.9.28/fs/eventpoll.c +--- linux-4.9.28.orig/fs/eventpoll.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/eventpoll.c 2017-05-19 03:37:25.158175874 +0200 +@@ -510,12 +510,12 @@ + */ + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + static void ep_remove_wait_queue(struct eppoll_entry *pwq) +diff -Nur linux-4.9.28.orig/fs/exec.c linux-4.9.28/fs/exec.c +--- linux-4.9.28.orig/fs/exec.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/exec.c 2017-05-19 03:37:25.158175874 +0200 +@@ -1017,12 +1017,14 @@ + } + } + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); + tsk->mm->vmacache_seqnum = 0; + vmacache_flush(tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + if (old_mm) { + up_read(&old_mm->mmap_sem); +diff -Nur linux-4.9.28.orig/fs/fuse/dir.c linux-4.9.28/fs/fuse/dir.c +--- linux-4.9.28.orig/fs/fuse/dir.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/fuse/dir.c 2017-05-19 03:37:25.158175874 +0200 +@@ -1191,7 +1191,7 @@ + struct inode *dir = d_inode(parent); + struct fuse_conn *fc; + struct inode *inode; +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + if (!o->nodeid) { + /* +diff -Nur linux-4.9.28.orig/fs/jbd2/checkpoint.c linux-4.9.28/fs/jbd2/checkpoint.c +--- linux-4.9.28.orig/fs/jbd2/checkpoint.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/jbd2/checkpoint.c 2017-05-19 03:37:25.158175874 +0200 +@@ -116,6 +116,8 @@ + nblocks = jbd2_space_needed(journal); + while (jbd2_log_space_left(journal) < nblocks) { + write_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* +diff -Nur linux-4.9.28.orig/fs/locks.c linux-4.9.28/fs/locks.c +--- linux-4.9.28.orig/fs/locks.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/locks.c 2017-05-19 03:37:25.158175874 +0200 +@@ -935,7 +935,7 @@ + return -ENOMEM; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + if (request->fl_flags & FL_ACCESS) + goto find_conflict; +@@ -976,7 +976,7 @@ + + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + if (new_fl) + locks_free_lock(new_fl); + locks_dispose_list(&dispose); +@@ -1013,7 +1013,7 @@ + new_fl2 = locks_alloc_lock(); + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + /* + * New lock request. Walk all POSIX locks and look for conflicts. If +@@ -1185,7 +1185,7 @@ + } + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + /* + * Free any unused locks. + */ +@@ -1460,7 +1460,7 @@ + return error; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + + time_out_leases(inode, &dispose); +@@ -1512,13 +1512,13 @@ + locks_insert_block(fl, new_fl); + trace_break_lease_block(inode, new_fl); + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + + locks_dispose_list(&dispose); + error = wait_event_interruptible_timeout(new_fl->fl_wait, + !new_fl->fl_next, break_time); + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + trace_break_lease_unblock(inode, new_fl); + locks_delete_block(new_fl); +@@ -1535,7 +1535,7 @@ + } + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + locks_dispose_list(&dispose); + locks_free_lock(new_fl); + return error; +@@ -1609,7 +1609,7 @@ + + ctx = smp_load_acquire(&inode->i_flctx); + if (ctx && !list_empty_careful(&ctx->flc_lease)) { +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + time_out_leases(inode, &dispose); + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { +@@ -1619,7 +1619,7 @@ + break; + } + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + + locks_dispose_list(&dispose); + } +@@ -1694,7 +1694,7 @@ + return -EINVAL; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + time_out_leases(inode, &dispose); + error = check_conflicting_open(dentry, arg, lease->fl_flags); +@@ -1765,7 +1765,7 @@ + lease->fl_lmops->lm_setup(lease, priv); + out: + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + locks_dispose_list(&dispose); + if (is_deleg) + inode_unlock(inode); +@@ -1788,7 +1788,7 @@ + return error; + } + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + list_for_each_entry(fl, &ctx->flc_lease, fl_list) { + if (fl->fl_file == filp && +@@ -1801,7 +1801,7 @@ + if (victim) + error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + locks_dispose_list(&dispose); + return error; + } +@@ -2532,13 +2532,13 @@ + if (list_empty(&ctx->flc_lease)) + return; + +- percpu_down_read_preempt_disable(&file_rwsem); ++ percpu_down_read(&file_rwsem); + spin_lock(&ctx->flc_lock); + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) + if (filp == fl->fl_file) + lease_modify(fl, F_UNLCK, &dispose); + spin_unlock(&ctx->flc_lock); +- percpu_up_read_preempt_enable(&file_rwsem); ++ percpu_up_read(&file_rwsem); + + locks_dispose_list(&dispose); + } +diff -Nur linux-4.9.28.orig/fs/namei.c linux-4.9.28/fs/namei.c +--- linux-4.9.28.orig/fs/namei.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/namei.c 2017-05-19 03:37:25.158175874 +0200 +@@ -1626,7 +1626,7 @@ + { + struct dentry *dentry = ERR_PTR(-ENOENT), *old; + struct inode *inode = dir->d_inode; +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + inode_lock_shared(inode); + /* Don't go there if it's already dead */ +@@ -3083,7 +3083,7 @@ + struct dentry *dentry; + int error, create_error = 0; + umode_t mode = op->mode; +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + + if (unlikely(IS_DEADDIR(dir_inode))) + return -ENOENT; +diff -Nur linux-4.9.28.orig/fs/namespace.c linux-4.9.28/fs/namespace.c +--- linux-4.9.28.orig/fs/namespace.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/namespace.c 2017-05-19 03:37:25.158175874 +0200 +@@ -14,6 +14,7 @@ + #include <linux/mnt_namespace.h> + #include <linux/user_namespace.h> + #include <linux/namei.h> ++#include <linux/delay.h> + #include <linux/security.h> + #include <linux/idr.h> + #include <linux/init.h> /* init_rootfs */ +@@ -356,8 +357,11 @@ + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) +- cpu_relax(); ++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { ++ preempt_enable(); ++ cpu_chill(); ++ preempt_disable(); ++ } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until +diff -Nur linux-4.9.28.orig/fs/nfs/delegation.c linux-4.9.28/fs/nfs/delegation.c +--- linux-4.9.28.orig/fs/nfs/delegation.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/delegation.c 2017-05-19 03:37:25.158175874 +0200 +@@ -150,11 +150,11 @@ + sp = state->owner; + /* Block nfs4_proc_unlck */ + mutex_lock(&sp->so_delegreturn_mutex); +- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); ++ seq = read_seqbegin(&sp->so_reclaim_seqlock); + err = nfs4_open_delegation_recall(ctx, state, stateid, type); + if (!err) + err = nfs_delegation_claim_locks(ctx, state, stateid); +- if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) ++ if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq)) + err = -EAGAIN; + mutex_unlock(&sp->so_delegreturn_mutex); + put_nfs_open_context(ctx); +diff -Nur linux-4.9.28.orig/fs/nfs/dir.c linux-4.9.28/fs/nfs/dir.c +--- linux-4.9.28.orig/fs/nfs/dir.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/dir.c 2017-05-19 03:37:25.162176025 +0200 +@@ -485,7 +485,7 @@ + void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) + { + struct qstr filename = QSTR_INIT(entry->name, entry->len); +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + struct dentry *dentry; + struct dentry *alias; + struct inode *dir = d_inode(parent); +@@ -1487,7 +1487,7 @@ + struct file *file, unsigned open_flags, + umode_t mode, int *opened) + { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + struct nfs_open_context *ctx; + struct dentry *res; + struct iattr attr = { .ia_valid = ATTR_OPEN }; +@@ -1802,7 +1802,11 @@ + + trace_nfs_rmdir_enter(dir, dentry); + if (d_really_is_positive(dentry)) { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ down(&NFS_I(d_inode(dentry))->rmdir_sem); ++#else + down_write(&NFS_I(d_inode(dentry))->rmdir_sem); ++#endif + error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); + /* Ensure the VFS deletes this inode */ + switch (error) { +@@ -1812,7 +1816,11 @@ + case -ENOENT: + nfs_dentry_handle_enoent(dentry); + } ++#ifdef CONFIG_PREEMPT_RT_BASE ++ up(&NFS_I(d_inode(dentry))->rmdir_sem); ++#else + up_write(&NFS_I(d_inode(dentry))->rmdir_sem); ++#endif + } else + error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); + trace_nfs_rmdir_exit(dir, dentry, error); +diff -Nur linux-4.9.28.orig/fs/nfs/inode.c linux-4.9.28/fs/nfs/inode.c +--- linux-4.9.28.orig/fs/nfs/inode.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/inode.c 2017-05-19 03:37:25.162176025 +0200 +@@ -1957,7 +1957,11 @@ + nfsi->nrequests = 0; + nfsi->commit_info.ncommit = 0; + atomic_set(&nfsi->commit_info.rpcs_out, 0); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ sema_init(&nfsi->rmdir_sem, 1); ++#else + init_rwsem(&nfsi->rmdir_sem); ++#endif + nfs4_init_once(nfsi); + } + +diff -Nur linux-4.9.28.orig/fs/nfs/nfs4_fs.h linux-4.9.28/fs/nfs/nfs4_fs.h +--- linux-4.9.28.orig/fs/nfs/nfs4_fs.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/nfs4_fs.h 2017-05-19 03:37:25.162176025 +0200 +@@ -111,7 +111,7 @@ + unsigned long so_flags; + struct list_head so_states; + struct nfs_seqid_counter so_seqid; +- seqcount_t so_reclaim_seqcount; ++ seqlock_t so_reclaim_seqlock; + struct mutex so_delegreturn_mutex; + }; + +diff -Nur linux-4.9.28.orig/fs/nfs/nfs4proc.c linux-4.9.28/fs/nfs/nfs4proc.c +--- linux-4.9.28.orig/fs/nfs/nfs4proc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/nfs4proc.c 2017-05-19 03:37:25.162176025 +0200 +@@ -2695,7 +2695,7 @@ + unsigned int seq; + int ret; + +- seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); ++ seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); + + ret = _nfs4_proc_open(opendata); + if (ret != 0) +@@ -2733,7 +2733,7 @@ + + if (d_inode(dentry) == state->inode) { + nfs_inode_attach_open_context(ctx); +- if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) ++ if (read_seqretry(&sp->so_reclaim_seqlock, seq)) + nfs4_schedule_stateid_recovery(server, state); + } + out: +diff -Nur linux-4.9.28.orig/fs/nfs/nfs4state.c linux-4.9.28/fs/nfs/nfs4state.c +--- linux-4.9.28.orig/fs/nfs/nfs4state.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/nfs4state.c 2017-05-19 03:37:25.162176025 +0200 +@@ -488,7 +488,7 @@ + nfs4_init_seqid_counter(&sp->so_seqid); + atomic_set(&sp->so_count, 1); + INIT_LIST_HEAD(&sp->so_lru); +- seqcount_init(&sp->so_reclaim_seqcount); ++ seqlock_init(&sp->so_reclaim_seqlock); + mutex_init(&sp->so_delegreturn_mutex); + return sp; + } +@@ -1497,8 +1497,12 @@ + * recovering after a network partition or a reboot from a + * server that doesn't support a grace period. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ write_seqlock(&sp->so_reclaim_seqlock); ++#else ++ write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); ++#endif + spin_lock(&sp->so_lock); +- raw_write_seqcount_begin(&sp->so_reclaim_seqcount); + restart: + list_for_each_entry(state, &sp->so_states, open_states) { + if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) +@@ -1567,14 +1571,20 @@ + spin_lock(&sp->so_lock); + goto restart; + } +- raw_write_seqcount_end(&sp->so_reclaim_seqcount); + spin_unlock(&sp->so_lock); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ write_sequnlock(&sp->so_reclaim_seqlock); ++#else ++ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); ++#endif + return 0; + out_err: + nfs4_put_open_state(state); +- spin_lock(&sp->so_lock); +- raw_write_seqcount_end(&sp->so_reclaim_seqcount); +- spin_unlock(&sp->so_lock); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ write_sequnlock(&sp->so_reclaim_seqlock); ++#else ++ write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); ++#endif + return status; + } + +diff -Nur linux-4.9.28.orig/fs/nfs/unlink.c linux-4.9.28/fs/nfs/unlink.c +--- linux-4.9.28.orig/fs/nfs/unlink.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/nfs/unlink.c 2017-05-19 03:37:25.162176025 +0200 +@@ -12,7 +12,7 @@ + #include <linux/sunrpc/clnt.h> + #include <linux/nfs_fs.h> + #include <linux/sched.h> +-#include <linux/wait.h> ++#include <linux/swait.h> + #include <linux/namei.h> + #include <linux/fsnotify.h> + +@@ -51,6 +51,29 @@ + rpc_restart_call_prepare(task); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void nfs_down_anon(struct semaphore *sema) ++{ ++ down(sema); ++} ++ ++static void nfs_up_anon(struct semaphore *sema) ++{ ++ up(sema); ++} ++ ++#else ++static void nfs_down_anon(struct rw_semaphore *rwsem) ++{ ++ down_read_non_owner(rwsem); ++} ++ ++static void nfs_up_anon(struct rw_semaphore *rwsem) ++{ ++ up_read_non_owner(rwsem); ++} ++#endif ++ + /** + * nfs_async_unlink_release - Release the sillydelete data. + * @task: rpc_task of the sillydelete +@@ -64,7 +87,7 @@ + struct dentry *dentry = data->dentry; + struct super_block *sb = dentry->d_sb; + +- up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); ++ nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); + d_lookup_done(dentry); + nfs_free_unlinkdata(data); + dput(dentry); +@@ -117,10 +140,10 @@ + struct inode *dir = d_inode(dentry->d_parent); + struct dentry *alias; + +- down_read_non_owner(&NFS_I(dir)->rmdir_sem); ++ nfs_down_anon(&NFS_I(dir)->rmdir_sem); + alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); + if (IS_ERR(alias)) { +- up_read_non_owner(&NFS_I(dir)->rmdir_sem); ++ nfs_up_anon(&NFS_I(dir)->rmdir_sem); + return 0; + } + if (!d_in_lookup(alias)) { +@@ -142,7 +165,7 @@ + ret = 0; + spin_unlock(&alias->d_lock); + dput(alias); +- up_read_non_owner(&NFS_I(dir)->rmdir_sem); ++ nfs_up_anon(&NFS_I(dir)->rmdir_sem); + /* + * If we'd displaced old cached devname, free it. At that + * point dentry is definitely not a root, so we won't need +@@ -182,7 +205,7 @@ + goto out_free_name; + } + data->res.dir_attr = &data->dir_attr; +- init_waitqueue_head(&data->wq); ++ init_swait_queue_head(&data->wq); + + status = -EBUSY; + spin_lock(&dentry->d_lock); +diff -Nur linux-4.9.28.orig/fs/ntfs/aops.c linux-4.9.28/fs/ntfs/aops.c +--- linux-4.9.28.orig/fs/ntfs/aops.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/ntfs/aops.c 2017-05-19 03:37:25.162176025 +0200 +@@ -92,13 +92,13 @@ + ofs = 0; + if (file_ofs < init_size) + ofs = init_size - file_ofs; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page); + memset(kaddr + bh_offset(bh) + ofs, 0, + bh->b_size - ofs); + flush_dcache_page(page); + kunmap_atomic(kaddr); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } else { + clear_buffer_uptodate(bh); +@@ -107,8 +107,7 @@ + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -123,8 +122,7 @@ + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -145,13 +143,13 @@ + recs = PAGE_SIZE / rec_size; + /* Should have been verified before we got here... */ + BUG_ON(!recs); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page); + for (i = 0; i < recs; i++) + post_read_mst_fixup((NTFS_RECORD*)(kaddr + + i * rec_size), rec_size); + kunmap_atomic(kaddr); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + flush_dcache_page(page); + if (likely(page_uptodate && !PageError(page))) + SetPageUptodate(page); +@@ -159,9 +157,7 @@ + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +diff -Nur linux-4.9.28.orig/fs/proc/base.c linux-4.9.28/fs/proc/base.c +--- linux-4.9.28.orig/fs/proc/base.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/proc/base.c 2017-05-19 03:37:25.162176025 +0200 +@@ -1834,7 +1834,7 @@ + + child = d_hash_and_lookup(dir, &qname); + if (!child) { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + child = d_alloc_parallel(dir, &qname, &wq); + if (IS_ERR(child)) + goto end_instantiate; +diff -Nur linux-4.9.28.orig/fs/proc/proc_sysctl.c linux-4.9.28/fs/proc/proc_sysctl.c +--- linux-4.9.28.orig/fs/proc/proc_sysctl.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/proc/proc_sysctl.c 2017-05-19 03:37:25.162176025 +0200 +@@ -632,7 +632,7 @@ + + child = d_lookup(dir, &qname); + if (!child) { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); + child = d_alloc_parallel(dir, &qname, &wq); + if (IS_ERR(child)) + return false; +diff -Nur linux-4.9.28.orig/fs/timerfd.c linux-4.9.28/fs/timerfd.c +--- linux-4.9.28.orig/fs/timerfd.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/fs/timerfd.c 2017-05-19 03:37:25.162176025 +0200 +@@ -471,7 +471,10 @@ + break; + } + spin_unlock_irq(&ctx->wqh.lock); +- cpu_relax(); ++ if (isalarm(ctx)) ++ hrtimer_wait_for_timer(&ctx->t.alarm.timer); ++ else ++ hrtimer_wait_for_timer(&ctx->t.tmr); + } + + /* +diff -Nur linux-4.9.28.orig/include/acpi/platform/aclinux.h linux-4.9.28/include/acpi/platform/aclinux.h +--- linux-4.9.28.orig/include/acpi/platform/aclinux.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/acpi/platform/aclinux.h 2017-05-19 03:37:25.166176182 +0200 +@@ -133,6 +133,7 @@ + + #define acpi_cache_t struct kmem_cache + #define acpi_spinlock spinlock_t * ++#define acpi_raw_spinlock raw_spinlock_t * + #define acpi_cpu_flags unsigned long + + /* Use native linux version of acpi_os_allocate_zeroed */ +@@ -151,6 +152,20 @@ + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock + ++#define acpi_os_create_raw_lock(__handle) \ ++({ \ ++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ ++ \ ++ if (lock) { \ ++ *(__handle) = lock; \ ++ raw_spin_lock_init(*(__handle)); \ ++ } \ ++ lock ? AE_OK : AE_NO_MEMORY; \ ++ }) ++ ++#define acpi_os_delete_raw_lock(__handle) kfree(__handle) ++ ++ + /* + * OSL interfaces used by debugger/disassembler + */ +diff -Nur linux-4.9.28.orig/include/asm-generic/bug.h linux-4.9.28/include/asm-generic/bug.h +--- linux-4.9.28.orig/include/asm-generic/bug.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/asm-generic/bug.h 2017-05-19 03:37:25.166176182 +0200 +@@ -215,6 +215,20 @@ + # define WARN_ON_SMP(x) ({0;}) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define BUG_ON_RT(c) BUG_ON(c) ++# define BUG_ON_NONRT(c) do { } while (0) ++# define WARN_ON_RT(condition) WARN_ON(condition) ++# define WARN_ON_NONRT(condition) do { } while (0) ++# define WARN_ON_ONCE_NONRT(condition) do { } while (0) ++#else ++# define BUG_ON_RT(c) do { } while (0) ++# define BUG_ON_NONRT(c) BUG_ON(c) ++# define WARN_ON_RT(condition) do { } while (0) ++# define WARN_ON_NONRT(condition) WARN_ON(condition) ++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) ++#endif ++ + #endif /* __ASSEMBLY__ */ + + #endif +diff -Nur linux-4.9.28.orig/include/linux/blkdev.h linux-4.9.28/include/linux/blkdev.h +--- linux-4.9.28.orig/include/linux/blkdev.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/blkdev.h 2017-05-19 03:37:25.166176182 +0200 +@@ -89,6 +89,7 @@ + struct list_head queuelist; + union { + struct call_single_data csd; ++ struct work_struct work; + u64 fifo_time; + }; + +@@ -467,7 +468,7 @@ + struct throtl_data *td; + #endif + struct rcu_head rcu_head; +- wait_queue_head_t mq_freeze_wq; ++ struct swait_queue_head mq_freeze_wq; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + +diff -Nur linux-4.9.28.orig/include/linux/blk-mq.h linux-4.9.28/include/linux/blk-mq.h +--- linux-4.9.28.orig/include/linux/blk-mq.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/blk-mq.h 2017-05-19 03:37:25.166176182 +0200 +@@ -209,7 +209,7 @@ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; + } + +- ++void __blk_mq_complete_request_remote_work(struct work_struct *work); + int blk_mq_request_started(struct request *rq); + void blk_mq_start_request(struct request *rq); + void blk_mq_end_request(struct request *rq, int error); +diff -Nur linux-4.9.28.orig/include/linux/bottom_half.h linux-4.9.28/include/linux/bottom_half.h +--- linux-4.9.28.orig/include/linux/bottom_half.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/bottom_half.h 2017-05-19 03:37:25.166176182 +0200 +@@ -3,6 +3,39 @@ + + #include <linux/preempt.h> + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++extern void __local_bh_disable(void); ++extern void _local_bh_enable(void); ++extern void __local_bh_enable(void); ++ ++static inline void local_bh_disable(void) ++{ ++ __local_bh_disable(); ++} ++ ++static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_disable(); ++} ++ ++static inline void local_bh_enable(void) ++{ ++ __local_bh_enable(); ++} ++ ++static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_enable(); ++} ++ ++static inline void local_bh_enable_ip(unsigned long ip) ++{ ++ __local_bh_enable(); ++} ++ ++#else ++ + #ifdef CONFIG_TRACE_IRQFLAGS + extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); + #else +@@ -30,5 +63,6 @@ + { + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); + } ++#endif + + #endif /* _LINUX_BH_H */ +diff -Nur linux-4.9.28.orig/include/linux/buffer_head.h linux-4.9.28/include/linux/buffer_head.h +--- linux-4.9.28.orig/include/linux/buffer_head.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/buffer_head.h 2017-05-19 03:37:25.166176182 +0200 +@@ -75,8 +75,50 @@ + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#if IS_ENABLED(CONFIG_JBD2) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++{ ++ unsigned long flags; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; ++} ++ ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++{ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif ++} ++ ++static inline void buffer_head_init_locks(struct buffer_head *bh) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#if IS_ENABLED(CONFIG_JBD2) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif ++#endif ++} ++ + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. +diff -Nur linux-4.9.28.orig/include/linux/cgroup-defs.h linux-4.9.28/include/linux/cgroup-defs.h +--- linux-4.9.28.orig/include/linux/cgroup-defs.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/cgroup-defs.h 2017-05-19 03:37:25.166176182 +0200 +@@ -16,6 +16,7 @@ + #include <linux/percpu-refcount.h> + #include <linux/percpu-rwsem.h> + #include <linux/workqueue.h> ++#include <linux/swork.h> + + #ifdef CONFIG_CGROUPS + +@@ -137,6 +138,7 @@ + /* percpu_ref killing and RCU release */ + struct rcu_head rcu_head; + struct work_struct destroy_work; ++ struct swork_event destroy_swork; + }; + + /* +diff -Nur linux-4.9.28.orig/include/linux/completion.h linux-4.9.28/include/linux/completion.h +--- linux-4.9.28.orig/include/linux/completion.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/completion.h 2017-05-19 03:37:25.166176182 +0200 +@@ -7,8 +7,7 @@ + * Atomic wait-for-completion handler data structures. + * See kernel/sched/completion.c for details. + */ +- +-#include <linux/wait.h> ++#include <linux/swait.h> + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -24,11 +23,11 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_queue_head wait; + }; + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) +@@ -73,7 +72,7 @@ + static inline void init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_queue_head(&x->wait); + } + + /** +diff -Nur linux-4.9.28.orig/include/linux/cpu.h linux-4.9.28/include/linux/cpu.h +--- linux-4.9.28.orig/include/linux/cpu.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/cpu.h 2017-05-19 03:37:25.166176182 +0200 +@@ -182,6 +182,8 @@ + extern void put_online_cpus(void); + extern void cpu_hotplug_disable(void); + extern void cpu_hotplug_enable(void); ++extern void pin_current_cpu(void); ++extern void unpin_current_cpu(void); + #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) + #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) + #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +@@ -199,6 +201,8 @@ + #define put_online_cpus() do { } while (0) + #define cpu_hotplug_disable() do { } while (0) + #define cpu_hotplug_enable() do { } while (0) ++static inline void pin_current_cpu(void) { } ++static inline void unpin_current_cpu(void) { } + #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + /* These aren't inline functions due to a GCC bug. */ +diff -Nur linux-4.9.28.orig/include/linux/dcache.h linux-4.9.28/include/linux/dcache.h +--- linux-4.9.28.orig/include/linux/dcache.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/dcache.h 2017-05-19 03:37:25.166176182 +0200 +@@ -11,6 +11,7 @@ + #include <linux/rcupdate.h> + #include <linux/lockref.h> + #include <linux/stringhash.h> ++#include <linux/wait.h> + + struct path; + struct vfsmount; +@@ -100,7 +101,7 @@ + + union { + struct list_head d_lru; /* LRU list */ +- wait_queue_head_t *d_wait; /* in-lookup ones only */ ++ struct swait_queue_head *d_wait; /* in-lookup ones only */ + }; + struct list_head d_child; /* child of parent list */ + struct list_head d_subdirs; /* our children */ +@@ -230,7 +231,7 @@ + extern struct dentry * d_alloc(struct dentry *, const struct qstr *); + extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); + extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, +- wait_queue_head_t *); ++ struct swait_queue_head *); + extern struct dentry * d_splice_alias(struct inode *, struct dentry *); + extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); + extern struct dentry * d_exact_alias(struct dentry *, struct inode *); +diff -Nur linux-4.9.28.orig/include/linux/delay.h linux-4.9.28/include/linux/delay.h +--- linux-4.9.28.orig/include/linux/delay.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/delay.h 2017-05-19 03:37:25.166176182 +0200 +@@ -52,4 +52,10 @@ + msleep(seconds * 1000); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void cpu_chill(void); ++#else ++# define cpu_chill() cpu_relax() ++#endif ++ + #endif /* defined(_LINUX_DELAY_H) */ +diff -Nur linux-4.9.28.orig/include/linux/highmem.h linux-4.9.28/include/linux/highmem.h +--- linux-4.9.28.orig/include/linux/highmem.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/highmem.h 2017-05-19 03:37:25.166176182 +0200 +@@ -7,6 +7,7 @@ + #include <linux/mm.h> + #include <linux/uaccess.h> + #include <linux/hardirq.h> ++#include <linux/sched.h> + + #include <asm/cacheflush.h> + +@@ -65,7 +66,7 @@ + + static inline void *kmap_atomic(struct page *page) + { +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + return page_address(page); + } +@@ -74,7 +75,7 @@ + static inline void __kunmap_atomic(void *addr) + { + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + + #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) +@@ -86,32 +87,51 @@ + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx >= KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +diff -Nur linux-4.9.28.orig/include/linux/hrtimer.h linux-4.9.28/include/linux/hrtimer.h +--- linux-4.9.28.orig/include/linux/hrtimer.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/hrtimer.h 2017-05-19 03:37:25.166176182 +0200 +@@ -87,6 +87,9 @@ + * @function: timer expiry callback function + * @base: pointer to the timer base (per cpu and per clock) + * @state: state information (See bit values above) ++ * @cb_entry: list entry to defer timers from hardirq context ++ * @irqsafe: timer can run in hardirq context ++ * @praecox: timer expiry time if expired at the time of programming + * @is_rel: Set if the timer was armed relative + * @start_pid: timer statistics field to store the pid of the task which + * started the timer +@@ -103,6 +106,11 @@ + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; ++ struct list_head cb_entry; ++ int irqsafe; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ ktime_t praecox; ++#endif + u8 is_rel; + #ifdef CONFIG_TIMER_STATS + int start_pid; +@@ -123,11 +131,7 @@ + struct task_struct *task; + }; + +-#ifdef CONFIG_64BIT + # define HRTIMER_CLOCK_BASE_ALIGN 64 +-#else +-# define HRTIMER_CLOCK_BASE_ALIGN 32 +-#endif + + /** + * struct hrtimer_clock_base - the timer base for a specific clock +@@ -136,6 +140,7 @@ + * timer to a base on another cpu. + * @clockid: clock id for per_cpu support + * @active: red black tree root node for the active timers ++ * @expired: list head for deferred timers. + * @get_time: function to retrieve the current time of the clock + * @offset: offset of this clock to the monotonic base + */ +@@ -144,6 +149,7 @@ + int index; + clockid_t clockid; + struct timerqueue_head active; ++ struct list_head expired; + ktime_t (*get_time)(void); + ktime_t offset; + } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); +@@ -187,6 +193,7 @@ + raw_spinlock_t lock; + seqcount_t seq; + struct hrtimer *running; ++ struct hrtimer *running_soft; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; +@@ -203,6 +210,9 @@ + unsigned int nr_hangs; + unsigned int max_hang_time; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ wait_queue_head_t wait; ++#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + } ____cacheline_aligned; + +@@ -412,6 +422,13 @@ + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } + ++/* Softirq preemption could deadlock timer removal */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++#else ++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++#endif ++ + /* Query timers: */ + extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); + +@@ -436,9 +453,15 @@ + * Helper function to check, whether the timer is running the callback + * function + */ +-static inline int hrtimer_callback_running(struct hrtimer *timer) ++static inline int hrtimer_callback_running(const struct hrtimer *timer) + { +- return timer->base->cpu_base->running == timer; ++ if (timer->base->cpu_base->running == timer) ++ return 1; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (timer->base->cpu_base->running_soft == timer) ++ return 1; ++#endif ++ return 0; + } + + /* Forward a hrtimer so it expires after now: */ +diff -Nur linux-4.9.28.orig/include/linux/idr.h linux-4.9.28/include/linux/idr.h +--- linux-4.9.28.orig/include/linux/idr.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/idr.h 2017-05-19 03:37:25.166176182 +0200 +@@ -95,10 +95,14 @@ + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++void idr_preload_end(void); ++#else + static inline void idr_preload_end(void) + { + preempt_enable(); + } ++#endif + + /** + * idr_find - return pointer for given id +diff -Nur linux-4.9.28.orig/include/linux/init_task.h linux-4.9.28/include/linux/init_task.h +--- linux-4.9.28.orig/include/linux/init_task.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/init_task.h 2017-05-19 03:37:25.166176182 +0200 +@@ -150,6 +150,12 @@ + # define INIT_PERF_EVENTS(tsk) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ + .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ +@@ -164,6 +170,7 @@ + #ifdef CONFIG_RT_MUTEXES + # define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = RB_ROOT, \ ++ .pi_top_task = NULL, \ + .pi_waiters_leftmost = NULL, + #else + # define INIT_RT_MUTEXES(tsk) +@@ -250,6 +257,7 @@ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ INIT_TIMER_LIST \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ +diff -Nur linux-4.9.28.orig/include/linux/interrupt.h linux-4.9.28/include/linux/interrupt.h +--- linux-4.9.28.orig/include/linux/interrupt.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/interrupt.h 2017-05-19 03:37:25.166176182 +0200 +@@ -14,6 +14,7 @@ + #include <linux/hrtimer.h> + #include <linux/kref.h> + #include <linux/workqueue.h> ++#include <linux/swork.h> + + #include <linux/atomic.h> + #include <asm/ptrace.h> +@@ -61,6 +62,7 @@ + * interrupt handler after suspending interrupts. For system + * wakeup devices users need to implement wakeup detection in + * their interrupt handlers. ++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) + */ + #define IRQF_SHARED 0x00000080 + #define IRQF_PROBE_SHARED 0x00000100 +@@ -74,6 +76,7 @@ + #define IRQF_NO_THREAD 0x00010000 + #define IRQF_EARLY_RESUME 0x00020000 + #define IRQF_COND_SUSPEND 0x00040000 ++#define IRQF_NO_SOFTIRQ_CALL 0x00080000 + + #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +@@ -196,7 +199,7 @@ + #ifdef CONFIG_LOCKDEP + # define local_irq_enable_in_hardirq() do { } while (0) + #else +-# define local_irq_enable_in_hardirq() local_irq_enable() ++# define local_irq_enable_in_hardirq() local_irq_enable_nort() + #endif + + extern void disable_irq_nosync(unsigned int irq); +@@ -216,6 +219,7 @@ + * struct irq_affinity_notify - context for notification of IRQ affinity changes + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use ++ * @swork: Swork item, for internal use + * @work: Work item, for internal use + * @notify: Function to be called on change. This will be + * called in process context. +@@ -227,7 +231,11 @@ + struct irq_affinity_notify { + unsigned int irq; + struct kref kref; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct swork_event swork; ++#else + struct work_struct work; ++#endif + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); + }; +@@ -406,9 +414,13 @@ + bool state); + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + extern bool force_irqthreads; ++# else ++# define force_irqthreads (true) ++# endif + #else +-#define force_irqthreads (0) ++#define force_irqthreads (false) + #endif + + #ifndef __ARCH_SET_SOFTIRQ_PENDING +@@ -465,9 +477,10 @@ + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +- ++static inline void thread_do_softirq(void) { do_softirq(); } + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +@@ -476,13 +489,25 @@ + __do_softirq(); + } + #endif ++#else ++extern void thread_do_softirq(void); ++#endif + + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); + extern void __raise_softirq_irqoff(unsigned int nr); ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void __raise_softirq_irqoff_ksoft(unsigned int nr); ++#else ++static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++} ++#endif + + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); ++extern void softirq_check_pending_idle(void); + + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +@@ -504,8 +529,9 @@ + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. +- * If this tasklet is already running on another CPU (or schedule is called +- from tasklet itself), it is rescheduled for later. ++ * If this tasklet is already running on another CPU, it is rescheduled ++ for later. ++ * Schedule must not be called from the tasklet itself (a lockup occurs) + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. +@@ -530,27 +556,36 @@ + enum + { + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ +- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_PENDING /* Tasklet is pending */ + }; + +-#ifdef CONFIG_SMP ++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) ++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) ++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + static inline int tasklet_trylock(struct tasklet_struct *t) + { + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + ++static inline int tasklet_tryunlock(struct tasklet_struct *t) ++{ ++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; ++} ++ + static inline void tasklet_unlock(struct tasklet_struct *t) + { + smp_mb__before_atomic(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +-} ++extern void tasklet_unlock_wait(struct tasklet_struct *t); ++ + #else + #define tasklet_trylock(t) 1 ++#define tasklet_tryunlock(t) 1 + #define tasklet_unlock_wait(t) do { } while (0) + #define tasklet_unlock(t) do { } while (0) + #endif +@@ -599,12 +634,7 @@ + smp_mb(); + } + +-static inline void tasklet_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic(); +- atomic_dec(&t->count); +-} +- ++extern void tasklet_enable(struct tasklet_struct *t); + extern void tasklet_kill(struct tasklet_struct *t); + extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); + extern void tasklet_init(struct tasklet_struct *t, +@@ -635,6 +665,12 @@ + tasklet_kill(&ttimer->tasklet); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif ++ + /* + * Autoprobing for irqs: + * +diff -Nur linux-4.9.28.orig/include/linux/irqdesc.h linux-4.9.28/include/linux/irqdesc.h +--- linux-4.9.28.orig/include/linux/irqdesc.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/irqdesc.h 2017-05-19 03:37:25.166176182 +0200 +@@ -66,6 +66,7 @@ + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; ++ u64 random_ip; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + const struct cpumask *percpu_affinity; +diff -Nur linux-4.9.28.orig/include/linux/irqflags.h linux-4.9.28/include/linux/irqflags.h +--- linux-4.9.28.orig/include/linux/irqflags.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/irqflags.h 2017-05-19 03:37:25.166176182 +0200 +@@ -25,8 +25,6 @@ + # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) + # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) + # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) + # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, + #else + # define trace_hardirqs_on() do { } while (0) +@@ -39,9 +37,15 @@ + # define trace_softirqs_enabled(p) 0 + # define trace_hardirq_enter() do { } while (0) + # define trace_hardirq_exit() do { } while (0) ++# define INIT_TRACE_IRQFLAGS ++#endif ++ ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) ++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) ++#else + # define lockdep_softirq_enter() do { } while (0) + # define lockdep_softirq_exit() do { } while (0) +-# define INIT_TRACE_IRQFLAGS + #endif + + #if defined(CONFIG_IRQSOFF_TRACER) || \ +@@ -148,4 +152,23 @@ + + #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) + ++/* ++ * local_irq* variants depending on RT/!RT ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define local_irq_disable_nort() do { } while (0) ++# define local_irq_enable_nort() do { } while (0) ++# define local_irq_save_nort(flags) local_save_flags(flags) ++# define local_irq_restore_nort(flags) (void)(flags) ++# define local_irq_disable_rt() local_irq_disable() ++# define local_irq_enable_rt() local_irq_enable() ++#else ++# define local_irq_disable_nort() local_irq_disable() ++# define local_irq_enable_nort() local_irq_enable() ++# define local_irq_save_nort(flags) local_irq_save(flags) ++# define local_irq_restore_nort(flags) local_irq_restore(flags) ++# define local_irq_disable_rt() do { } while (0) ++# define local_irq_enable_rt() do { } while (0) ++#endif ++ + #endif +diff -Nur linux-4.9.28.orig/include/linux/irq.h linux-4.9.28/include/linux/irq.h +--- linux-4.9.28.orig/include/linux/irq.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/irq.h 2017-05-19 03:37:25.166176182 +0200 +@@ -72,6 +72,7 @@ + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. ++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable + */ + enum { +@@ -99,13 +100,14 @@ + IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), ++ IRQ_NO_SOFTIRQ_CALL = (1 << 20), + }; + + #define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ +- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) ++ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) + + #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +diff -Nur linux-4.9.28.orig/include/linux/irq_work.h linux-4.9.28/include/linux/irq_work.h +--- linux-4.9.28.orig/include/linux/irq_work.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/irq_work.h 2017-05-19 03:37:25.166176182 +0200 +@@ -16,6 +16,7 @@ + #define IRQ_WORK_BUSY 2UL + #define IRQ_WORK_FLAGS 3UL + #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ ++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ + + struct irq_work { + unsigned long flags; +@@ -51,4 +52,10 @@ + static inline void irq_work_run(void) { } + #endif + ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++void irq_work_tick_soft(void); ++#else ++static inline void irq_work_tick_soft(void) { } ++#endif ++ + #endif /* _LINUX_IRQ_WORK_H */ +diff -Nur linux-4.9.28.orig/include/linux/jbd2.h linux-4.9.28/include/linux/jbd2.h +--- linux-4.9.28.orig/include/linux/jbd2.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/jbd2.h 2017-05-19 03:37:25.166176182 +0200 +@@ -347,32 +347,56 @@ + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #define J_ASSERT(assert) BUG_ON(!(assert)) +diff -Nur linux-4.9.28.orig/include/linux/kdb.h linux-4.9.28/include/linux/kdb.h +--- linux-4.9.28.orig/include/linux/kdb.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/kdb.h 2017-05-19 03:37:25.166176182 +0200 +@@ -167,6 +167,7 @@ + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); + ++#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -201,6 +202,7 @@ + extern int kdb_unregister(char *); + #else /* ! CONFIG_KGDB_KDB */ + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } ++#define in_kdb_printk() (0) + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +diff -Nur linux-4.9.28.orig/include/linux/kernel.h linux-4.9.28/include/linux/kernel.h +--- linux-4.9.28.orig/include/linux/kernel.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/kernel.h 2017-05-19 03:37:25.166176182 +0200 +@@ -194,6 +194,9 @@ + */ + # define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) ++ ++# define might_sleep_no_state_check() \ ++ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) + # define sched_annotate_sleep() (current->task_state_change = 0) + #else + static inline void ___might_sleep(const char *file, int line, +@@ -201,6 +204,7 @@ + static inline void __might_sleep(const char *file, int line, + int preempt_offset) { } + # define might_sleep() do { might_resched(); } while (0) ++# define might_sleep_no_state_check() do { might_resched(); } while (0) + # define sched_annotate_sleep() do { } while (0) + #endif + +@@ -488,6 +492,7 @@ + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, ++ SYSTEM_SUSPEND, + } system_state; + + #define TAINT_PROPRIETARY_MODULE 0 +diff -Nur linux-4.9.28.orig/include/linux/list_bl.h linux-4.9.28/include/linux/list_bl.h +--- linux-4.9.28.orig/include/linux/list_bl.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/list_bl.h 2017-05-19 03:37:25.166176182 +0200 +@@ -2,6 +2,7 @@ + #define _LINUX_LIST_BL_H + + #include <linux/list.h> ++#include <linux/spinlock.h> + #include <linux/bit_spinlock.h> + + /* +@@ -32,13 +33,24 @@ + + struct hlist_bl_head { + struct hlist_bl_node *first; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spinlock_t lock; ++#endif + }; + + struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; +-#define INIT_HLIST_BL_HEAD(ptr) \ +- ((ptr)->first = NULL) ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++#define INIT_HLIST_BL_HEAD(h) \ ++do { \ ++ (h)->first = NULL; \ ++ raw_spin_lock_init(&(h)->lock); \ ++} while (0) ++#else ++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL ++#endif + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +@@ -118,12 +130,26 @@ + + static inline void hlist_bl_lock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(0, (unsigned long *)b); ++#else ++ raw_spin_lock(&b->lock); ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __set_bit(0, (unsigned long *)b); ++#endif ++#endif + } + + static inline void hlist_bl_unlock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + __bit_spin_unlock(0, (unsigned long *)b); ++#else ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __clear_bit(0, (unsigned long *)b); ++#endif ++ raw_spin_unlock(&b->lock); ++#endif + } + + static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) +diff -Nur linux-4.9.28.orig/include/linux/locallock.h linux-4.9.28/include/linux/locallock.h +--- linux-4.9.28.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/locallock.h 2017-05-19 03:37:25.166176182 +0200 +@@ -0,0 +1,278 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include <linux/percpu.h> ++#include <linux/spinlock.h> ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif ++ ++/* ++ * per cpu lock based substitute for local_irq_*() ++ */ ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } ++ ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ ++ DECLARE_PER_CPU(struct local_irq_lock, lvar) ++ ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) ++ ++/* ++ * spin_lock|trylock|unlock_local flavour that does not migrate disable ++ * used for __local_lock|trylock|unlock where get_local_var/put_local_var ++ * already takes care of the migrate_disable/enable ++ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define spin_lock_local(lock) rt_spin_lock__no_mg(lock) ++# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock) ++# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock) ++#else ++# define spin_lock_local(lock) spin_lock(lock) ++# define spin_trylock_local(lock) spin_trylock(lock) ++# define spin_unlock_local(lock) spin_unlock(lock) ++#endif ++ ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock_local(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ } ++ lv->nestcnt++; ++} ++ ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_on(lvar, cpu) \ ++ do { __local_lock(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock_local(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; ++ } ++ return 0; ++} ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) ++ ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return; ++ ++ lv->owner = NULL; ++ spin_unlock_local(&lv->lock); ++} ++ ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(this_cpu_ptr(&lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_on(lvar, cpu) \ ++ do { __local_unlock(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_lock_irq(struct local_irq_lock *lv) ++{ ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} ++ ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); ++} ++ ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(this_cpu_ptr(&lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) ++ ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} ++ ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __this_cpu_read(lvar.flags); \ ++ } while (0) ++ ++#define local_lock_irqsave_on(lvar, _flags, cpu) \ ++ do { \ ++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ ++ _flags = per_cpu(lvar, cpu).flags; \ ++ } while (0) ++ ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; ++ ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} ++ ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irqrestore_on(lvar, flags, cpu) \ ++ do { \ ++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ ++ } while (0) ++ ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) ++ ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) ++ ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) ++ ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ this_cpu_ptr(&var); \ ++ })) ++ ++#define put_locked_var(lvar, var) local_unlock(lvar); ++ ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) ++ ++#define local_unlock_cpu(lvar) local_unlock(lvar) ++ ++#else /* PREEMPT_RT_BASE */ ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar ++ ++static inline void local_irq_lock_init(int lvar) { } ++ ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_lock_irq_on(lvar, cpu) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_unlock_irq_on(lvar, cpu) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++ ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) ++ ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) ++ ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() ++ ++#endif ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/mm_types.h linux-4.9.28/include/linux/mm_types.h +--- linux-4.9.28.orig/include/linux/mm_types.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/mm_types.h 2017-05-19 03:37:25.170176339 +0200 +@@ -11,6 +11,7 @@ + #include <linux/completion.h> + #include <linux/cpumask.h> + #include <linux/uprobes.h> ++#include <linux/rcupdate.h> + #include <linux/page-flags-layout.h> + #include <linux/workqueue.h> + #include <asm/page.h> +@@ -509,6 +510,9 @@ + bool tlb_flush_pending; + #endif + struct uprobes_state uprobes_state; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + #ifdef CONFIG_X86_INTEL_MPX + /* address of the bounds directory */ + void __user *bd_addr; +diff -Nur linux-4.9.28.orig/include/linux/module.h linux-4.9.28/include/linux/module.h +--- linux-4.9.28.orig/include/linux/module.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/module.h 2017-05-19 03:37:25.170176339 +0200 +@@ -496,6 +496,7 @@ + struct module *__module_text_address(unsigned long addr); + struct module *__module_address(unsigned long addr); + bool is_module_address(unsigned long addr); ++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); + bool is_module_percpu_address(unsigned long addr); + bool is_module_text_address(unsigned long addr); + +@@ -662,6 +663,11 @@ + { + return false; + } ++ ++static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) ++{ ++ return false; ++} + + static inline bool is_module_text_address(unsigned long addr) + { +diff -Nur linux-4.9.28.orig/include/linux/mutex.h linux-4.9.28/include/linux/mutex.h +--- linux-4.9.28.orig/include/linux/mutex.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/mutex.h 2017-05-19 03:37:25.170176339 +0200 +@@ -19,6 +19,17 @@ + #include <asm/processor.h> + #include <linux/osq_lock.h> + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include <linux/mutex_rt.h> ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -99,13 +110,6 @@ + static inline void mutex_destroy(struct mutex *lock) {} + #endif + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -173,6 +177,8 @@ + extern int mutex_trylock(struct mutex *lock); + extern void mutex_unlock(struct mutex *lock); + ++#endif /* !PREEMPT_RT_FULL */ ++ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + #endif /* __LINUX_MUTEX_H */ +diff -Nur linux-4.9.28.orig/include/linux/mutex_rt.h linux-4.9.28/include/linux/mutex_rt.h +--- linux-4.9.28.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/mutex_rt.h 2017-05-19 03:37:25.170176339 +0200 +@@ -0,0 +1,89 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H ++ ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif ++ ++#include <linux/rtmutex.h> ++ ++/* FIXME: Just for __lockfunc */ ++#include <linux/spinlock.h> ++ ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } ++ ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++ ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); ++ ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++ ++#ifdef CONFIG_DEBUG_MUTEXES ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++#else ++static inline void mutex_destroy(struct mutex *lock) {} ++#endif ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) ++ ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) ++ ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++#endif ++ ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) ++ ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/netdevice.h linux-4.9.28/include/linux/netdevice.h +--- linux-4.9.28.orig/include/linux/netdevice.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/netdevice.h 2017-05-19 03:37:25.170176339 +0200 +@@ -396,7 +396,19 @@ + typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); + + void __napi_schedule(struct napi_struct *n); ++ ++/* ++ * When PREEMPT_RT_FULL is defined, all device interrupt handlers ++ * run as threads, and they can also be preempted (without PREEMPT_RT ++ * interrupt threads can not be preempted). Which means that calling ++ * __napi_schedule_irqoff() from an interrupt handler can be preempted ++ * and can corrupt the napi->poll_list. ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define __napi_schedule_irqoff(n) __napi_schedule(n) ++#else + void __napi_schedule_irqoff(struct napi_struct *n); ++#endif + + static inline bool napi_disable_pending(struct napi_struct *n) + { +@@ -2463,14 +2475,53 @@ + void synchronize_net(void); + int init_dummy_netdev(struct net_device *dev); + +-DECLARE_PER_CPU(int, xmit_recursion); + #define XMIT_RECURSION_LIMIT 10 ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int dev_recursion_level(void) ++{ ++ return current->xmit_recursion; ++} ++ ++static inline int xmit_rec_read(void) ++{ ++ return current->xmit_recursion; ++} ++ ++static inline void xmit_rec_inc(void) ++{ ++ current->xmit_recursion++; ++} ++ ++static inline void xmit_rec_dec(void) ++{ ++ current->xmit_recursion--; ++} ++ ++#else ++ ++DECLARE_PER_CPU(int, xmit_recursion); + + static inline int dev_recursion_level(void) + { + return this_cpu_read(xmit_recursion); + } + ++static inline int xmit_rec_read(void) ++{ ++ return __this_cpu_read(xmit_recursion); ++} ++ ++static inline void xmit_rec_inc(void) ++{ ++ __this_cpu_inc(xmit_recursion); ++} ++ ++static inline void xmit_rec_dec(void) ++{ ++ __this_cpu_dec(xmit_recursion); ++} ++#endif ++ + struct net_device *dev_get_by_index(struct net *net, int ifindex); + struct net_device *__dev_get_by_index(struct net *net, int ifindex); + struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +@@ -2855,6 +2906,7 @@ + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + + }; + +diff -Nur linux-4.9.28.orig/include/linux/netfilter/x_tables.h linux-4.9.28/include/linux/netfilter/x_tables.h +--- linux-4.9.28.orig/include/linux/netfilter/x_tables.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/netfilter/x_tables.h 2017-05-19 03:37:25.170176339 +0200 +@@ -4,6 +4,7 @@ + + #include <linux/netdevice.h> + #include <linux/static_key.h> ++#include <linux/locallock.h> + #include <uapi/linux/netfilter/x_tables.h> + + /* Test a struct->invflags and a boolean for inequality */ +@@ -300,6 +301,8 @@ + */ + DECLARE_PER_CPU(seqcount_t, xt_recseq); + ++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); ++ + /* xt_tee_enabled - true if x_tables needs to handle reentrancy + * + * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. +@@ -320,6 +323,9 @@ + { + unsigned int addend; + ++ /* RT protection */ ++ local_lock(xt_write_lock); ++ + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). +@@ -350,6 +356,7 @@ + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); ++ local_unlock(xt_write_lock); + } + + /* +diff -Nur linux-4.9.28.orig/include/linux/nfs_fs.h linux-4.9.28/include/linux/nfs_fs.h +--- linux-4.9.28.orig/include/linux/nfs_fs.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/nfs_fs.h 2017-05-19 03:37:25.170176339 +0200 +@@ -165,7 +165,11 @@ + + /* Readers: in-flight sillydelete RPC calls */ + /* Writers: rmdir */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct semaphore rmdir_sem; ++#else + struct rw_semaphore rmdir_sem; ++#endif + + #if IS_ENABLED(CONFIG_NFS_V4) + struct nfs4_cached_acl *nfs4_acl; +diff -Nur linux-4.9.28.orig/include/linux/nfs_xdr.h linux-4.9.28/include/linux/nfs_xdr.h +--- linux-4.9.28.orig/include/linux/nfs_xdr.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/nfs_xdr.h 2017-05-19 03:37:25.170176339 +0200 +@@ -1490,7 +1490,7 @@ + struct nfs_removeargs args; + struct nfs_removeres res; + struct dentry *dentry; +- wait_queue_head_t wq; ++ struct swait_queue_head wq; + struct rpc_cred *cred; + struct nfs_fattr dir_attr; + long timeout; +diff -Nur linux-4.9.28.orig/include/linux/notifier.h linux-4.9.28/include/linux/notifier.h +--- linux-4.9.28.orig/include/linux/notifier.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/notifier.h 2017-05-19 03:37:25.170176339 +0200 +@@ -6,7 +6,7 @@ + * + * Alan Cox <Alan.Cox@linux.org> + */ +- ++ + #ifndef _LINUX_NOTIFIER_H + #define _LINUX_NOTIFIER_H + #include <linux/errno.h> +@@ -42,9 +42,7 @@ + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very +- * often but notifier_blocks will seldom be removed. Also, SRCU notifier +- * chains are slightly more difficult to use because they require special +- * runtime initialization. ++ * often but notifier_blocks will seldom be removed. + */ + + struct notifier_block; +@@ -90,7 +88,7 @@ + (name)->head = NULL; \ + } while (0) + +-/* srcu_notifier_heads must be initialized and cleaned up dynamically */ ++/* srcu_notifier_heads must be cleaned up dynamically */ + extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); + #define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); +@@ -103,7 +101,13 @@ + .head = NULL } + #define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +-/* srcu_notifier_heads cannot be initialized statically */ ++ ++#define SRCU_NOTIFIER_INIT(name, pcpu) \ ++ { \ ++ .mutex = __MUTEX_INITIALIZER(name.mutex), \ ++ .head = NULL, \ ++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++ } + + #define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ +@@ -115,6 +119,18 @@ + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + ++#define _SRCU_NOTIFIER_HEAD(name, mod) \ ++ static DEFINE_PER_CPU(struct srcu_struct_array, \ ++ name##_head_srcu_array); \ ++ mod struct srcu_notifier_head name = \ ++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) ++ ++#define SRCU_NOTIFIER_HEAD(name) \ ++ _SRCU_NOTIFIER_HEAD(name, ) ++ ++#define SRCU_NOTIFIER_HEAD_STATIC(name) \ ++ _SRCU_NOTIFIER_HEAD(name, static) ++ + #ifdef __KERNEL__ + + extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, +@@ -184,12 +200,12 @@ + + /* + * Declared notifiers so far. I can imagine quite a few more chains +- * over time (eg laptop power reset chains, reboot chain (to clean ++ * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, +- * low memory chain, screenblank chain (for plug in modular screenblankers) ++ * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ +- ++ + /* CPU notfiers are defined in include/linux/cpu.h. */ + + /* netdevice notifiers are defined in include/linux/netdevice.h */ +diff -Nur linux-4.9.28.orig/include/linux/percpu.h linux-4.9.28/include/linux/percpu.h +--- linux-4.9.28.orig/include/linux/percpu.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/percpu.h 2017-05-19 03:37:25.170176339 +0200 +@@ -18,6 +18,35 @@ + #define PERCPU_MODULE_RESERVE 0 + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++#define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(&var); })) ++ ++#define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) ++ ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++#else ++ ++#define get_local_var(var) get_cpu_var(var) ++#define put_local_var(var) put_cpu_var(var) ++#define get_local_ptr(var) get_cpu_ptr(var) ++#define put_local_ptr(var) put_cpu_ptr(var) ++ ++#endif ++ + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +@@ -110,6 +139,7 @@ + #endif + + extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); ++extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); + extern bool is_kernel_percpu_address(unsigned long addr); + + #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) +diff -Nur linux-4.9.28.orig/include/linux/percpu-rwsem.h linux-4.9.28/include/linux/percpu-rwsem.h +--- linux-4.9.28.orig/include/linux/percpu-rwsem.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/percpu-rwsem.h 2017-05-19 03:37:25.170176339 +0200 +@@ -4,7 +4,7 @@ + #include <linux/atomic.h> + #include <linux/rwsem.h> + #include <linux/percpu.h> +-#include <linux/wait.h> ++#include <linux/swait.h> + #include <linux/rcu_sync.h> + #include <linux/lockdep.h> + +@@ -12,7 +12,7 @@ + struct rcu_sync rss; + unsigned int __percpu *read_count; + struct rw_semaphore rw_sem; +- wait_queue_head_t writer; ++ struct swait_queue_head writer; + int readers_block; + }; + +@@ -22,13 +22,13 @@ + .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ + .read_count = &__percpu_rwsem_rc_##name, \ + .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ +- .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ ++ .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ + } + + extern int __percpu_down_read(struct percpu_rw_semaphore *, int); + extern void __percpu_up_read(struct percpu_rw_semaphore *); + +-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) ++static inline void percpu_down_read(struct percpu_rw_semaphore *sem) + { + might_sleep(); + +@@ -46,16 +46,10 @@ + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + __percpu_down_read(sem, false); /* Unconditional memory barrier */ +- barrier(); + /* +- * The barrier() prevents the compiler from ++ * The preempt_enable() prevents the compiler from + * bleeding the critical section out. + */ +-} +- +-static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +-{ +- percpu_down_read_preempt_disable(sem); + preempt_enable(); + } + +@@ -82,13 +76,9 @@ + return ret; + } + +-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) ++static inline void percpu_up_read(struct percpu_rw_semaphore *sem) + { +- /* +- * The barrier() prevents the compiler from +- * bleeding the critical section out. +- */ +- barrier(); ++ preempt_disable(); + /* + * Same as in percpu_down_read(). + */ +@@ -101,12 +91,6 @@ + rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); + } + +-static inline void percpu_up_read(struct percpu_rw_semaphore *sem) +-{ +- preempt_disable(); +- percpu_up_read_preempt_enable(sem); +-} +- + extern void percpu_down_write(struct percpu_rw_semaphore *); + extern void percpu_up_write(struct percpu_rw_semaphore *); + +diff -Nur linux-4.9.28.orig/include/linux/pid.h linux-4.9.28/include/linux/pid.h +--- linux-4.9.28.orig/include/linux/pid.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/pid.h 2017-05-19 03:37:25.170176339 +0200 +@@ -2,6 +2,7 @@ + #define _LINUX_PID_H + + #include <linux/rcupdate.h> ++#include <linux/atomic.h> + + enum pid_type + { +diff -Nur linux-4.9.28.orig/include/linux/preempt.h linux-4.9.28/include/linux/preempt.h +--- linux-4.9.28.orig/include/linux/preempt.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/preempt.h 2017-05-19 03:37:25.170176339 +0200 +@@ -50,7 +50,11 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + /* We use the MSB mostly because its available */ + #define PREEMPT_NEED_RESCHED 0x80000000 +@@ -59,9 +63,15 @@ + #include <asm/preempt.h> + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() (0UL) ++extern int in_serving_softirq(void); ++#endif + + /* + * Are we doing bottom half or hardware interrupt processing? +@@ -72,7 +82,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + + /* + * Are we in NMI context? +@@ -91,7 +100,11 @@ + /* + * The preempt_count offset after spin_lock() + */ ++#if !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET ++#else ++#define PREEMPT_LOCK_OFFSET 0 ++#endif + + /* + * The preempt_count offset needed for things like: +@@ -140,6 +153,20 @@ + #define preempt_count_inc() preempt_count_add(1) + #define preempt_count_dec() preempt_count_sub(1) + ++#ifdef CONFIG_PREEMPT_LAZY ++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) ++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) ++#define inc_preempt_lazy_count() add_preempt_lazy_count(1) ++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) ++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) ++#else ++#define add_preempt_lazy_count(val) do { } while (0) ++#define sub_preempt_lazy_count(val) do { } while (0) ++#define inc_preempt_lazy_count() do { } while (0) ++#define dec_preempt_lazy_count() do { } while (0) ++#define preempt_lazy_count() (0) ++#endif ++ + #ifdef CONFIG_PREEMPT_COUNT + + #define preempt_disable() \ +@@ -148,13 +175,25 @@ + barrier(); \ + } while (0) + ++#define preempt_lazy_disable() \ ++do { \ ++ inc_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) ++ + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ + preempt_count_dec(); \ + } while (0) + +-#define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() preempt_check_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() barrier(); ++#endif + + #define preemptible() (preempt_count() == 0 && !irqs_disabled()) + +@@ -179,6 +218,13 @@ + __preempt_schedule(); \ + } while (0) + ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++ preempt_check_resched(); \ ++} while (0) ++ + #else /* !CONFIG_PREEMPT */ + #define preempt_enable() \ + do { \ +@@ -224,6 +270,7 @@ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + #define preemptible() 0 + + #endif /* CONFIG_PREEMPT_COUNT */ +@@ -244,10 +291,31 @@ + } while (0) + #define preempt_fold_need_resched() \ + do { \ +- if (tif_need_resched()) \ ++ if (tif_need_resched_now()) \ + set_preempt_need_resched(); \ + } while (0) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() barrier() ++# define preempt_enable_nort() barrier() ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++# endif /* CONFIG_SMP */ ++#else ++# define preempt_disable_rt() barrier() ++# define preempt_enable_rt() barrier() ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++# define migrate_disable() preempt_disable() ++# define migrate_enable() preempt_enable() ++#endif ++ + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; +diff -Nur linux-4.9.28.orig/include/linux/printk.h linux-4.9.28/include/linux/printk.h +--- linux-4.9.28.orig/include/linux/printk.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/printk.h 2017-05-19 03:37:25.170176339 +0200 +@@ -126,9 +126,11 @@ + #ifdef CONFIG_EARLY_PRINTK + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); ++extern void printk_kill(void); + #else + static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } + #endif + + #ifdef CONFIG_PRINTK_NMI +diff -Nur linux-4.9.28.orig/include/linux/radix-tree.h linux-4.9.28/include/linux/radix-tree.h +--- linux-4.9.28.orig/include/linux/radix-tree.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/radix-tree.h 2017-05-19 03:37:25.170176339 +0200 +@@ -292,6 +292,8 @@ + int radix_tree_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); ++void radix_tree_preload_end(void); ++ + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +@@ -314,11 +316,6 @@ + int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); + unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); + +-static inline void radix_tree_preload_end(void) +-{ +- preempt_enable(); +-} +- + /** + * struct radix_tree_iter - radix tree iterator state + * +diff -Nur linux-4.9.28.orig/include/linux/random.h linux-4.9.28/include/linux/random.h +--- linux-4.9.28.orig/include/linux/random.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/random.h 2017-05-19 03:37:25.170176339 +0200 +@@ -31,7 +31,7 @@ + + extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) __latent_entropy; +-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; ++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy; + + extern void get_random_bytes(void *buf, int nbytes); + extern int add_random_ready_callback(struct random_ready_callback *rdy); +diff -Nur linux-4.9.28.orig/include/linux/rbtree_augmented.h linux-4.9.28/include/linux/rbtree_augmented.h +--- linux-4.9.28.orig/include/linux/rbtree_augmented.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rbtree_augmented.h 2017-05-19 03:37:25.170176339 +0200 +@@ -26,6 +26,7 @@ + + #include <linux/compiler.h> + #include <linux/rbtree.h> ++#include <linux/rcupdate.h> + + /* + * Please note - only struct rb_augment_callbacks and the prototypes for +diff -Nur linux-4.9.28.orig/include/linux/rbtree.h linux-4.9.28/include/linux/rbtree.h +--- linux-4.9.28.orig/include/linux/rbtree.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rbtree.h 2017-05-19 03:37:25.170176339 +0200 +@@ -31,7 +31,7 @@ + + #include <linux/kernel.h> + #include <linux/stddef.h> +-#include <linux/rcupdate.h> ++#include <linux/rcu_assign_pointer.h> + + struct rb_node { + unsigned long __rb_parent_color; +diff -Nur linux-4.9.28.orig/include/linux/rcu_assign_pointer.h linux-4.9.28/include/linux/rcu_assign_pointer.h +--- linux-4.9.28.orig/include/linux/rcu_assign_pointer.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/rcu_assign_pointer.h 2017-05-19 03:37:25.170176339 +0200 +@@ -0,0 +1,54 @@ ++#ifndef __LINUX_RCU_ASSIGN_POINTER_H__ ++#define __LINUX_RCU_ASSIGN_POINTER_H__ ++#include <linux/compiler.h> ++#include <asm/barrier.h> ++ ++/** ++ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable ++ * @v: The value to statically initialize with. ++ */ ++#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) ++ ++/** ++ * rcu_assign_pointer() - assign to RCU-protected pointer ++ * @p: pointer to assign to ++ * @v: value to assign (publish) ++ * ++ * Assigns the specified value to the specified RCU-protected ++ * pointer, ensuring that any concurrent RCU readers will see ++ * any prior initialization. ++ * ++ * Inserts memory barriers on architectures that require them ++ * (which is most of them), and also prevents the compiler from ++ * reordering the code that initializes the structure after the pointer ++ * assignment. More importantly, this call documents which pointers ++ * will be dereferenced by RCU read-side code. ++ * ++ * In some special cases, you may use RCU_INIT_POINTER() instead ++ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due ++ * to the fact that it does not constrain either the CPU or the compiler. ++ * That said, using RCU_INIT_POINTER() when you should have used ++ * rcu_assign_pointer() is a very bad thing that results in ++ * impossible-to-diagnose memory corruption. So please be careful. ++ * See the RCU_INIT_POINTER() comment header for details. ++ * ++ * Note that rcu_assign_pointer() evaluates each of its arguments only ++ * once, appearances notwithstanding. One of the "extra" evaluations ++ * is in typeof() and the other visible only to sparse (__CHECKER__), ++ * neither of which actually execute the argument. As with most cpp ++ * macros, this execute-arguments-only-once property is important, so ++ * please be careful when making changes to rcu_assign_pointer() and the ++ * other macros that it invokes. ++ */ ++#define rcu_assign_pointer(p, v) \ ++({ \ ++ uintptr_t _r_a_p__v = (uintptr_t)(v); \ ++ \ ++ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ ++ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ ++ else \ ++ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ ++ _r_a_p__v; \ ++}) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/rcupdate.h linux-4.9.28/include/linux/rcupdate.h +--- linux-4.9.28.orig/include/linux/rcupdate.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rcupdate.h 2017-05-19 03:37:25.170176339 +0200 +@@ -46,6 +46,7 @@ + #include <linux/compiler.h> + #include <linux/ktime.h> + #include <linux/irqflags.h> ++#include <linux/rcu_assign_pointer.h> + + #include <asm/barrier.h> + +@@ -178,6 +179,9 @@ + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -201,6 +205,7 @@ + */ + void call_rcu_bh(struct rcu_head *head, + rcu_callback_t func); ++#endif + + /** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. +@@ -301,6 +306,11 @@ + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -326,6 +336,8 @@ + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +@@ -505,7 +517,14 @@ + int debug_lockdep_rcu_enabled(void); + + int rcu_read_lock_held(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) ++{ ++ return rcu_read_lock_held(); ++} ++#else + int rcu_read_lock_bh_held(void); ++#endif + + /** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? +@@ -626,54 +645,6 @@ + }) + + /** +- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable +- * @v: The value to statically initialize with. +- */ +-#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) +- +-/** +- * rcu_assign_pointer() - assign to RCU-protected pointer +- * @p: pointer to assign to +- * @v: value to assign (publish) +- * +- * Assigns the specified value to the specified RCU-protected +- * pointer, ensuring that any concurrent RCU readers will see +- * any prior initialization. +- * +- * Inserts memory barriers on architectures that require them +- * (which is most of them), and also prevents the compiler from +- * reordering the code that initializes the structure after the pointer +- * assignment. More importantly, this call documents which pointers +- * will be dereferenced by RCU read-side code. +- * +- * In some special cases, you may use RCU_INIT_POINTER() instead +- * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due +- * to the fact that it does not constrain either the CPU or the compiler. +- * That said, using RCU_INIT_POINTER() when you should have used +- * rcu_assign_pointer() is a very bad thing that results in +- * impossible-to-diagnose memory corruption. So please be careful. +- * See the RCU_INIT_POINTER() comment header for details. +- * +- * Note that rcu_assign_pointer() evaluates each of its arguments only +- * once, appearances notwithstanding. One of the "extra" evaluations +- * is in typeof() and the other visible only to sparse (__CHECKER__), +- * neither of which actually execute the argument. As with most cpp +- * macros, this execute-arguments-only-once property is important, so +- * please be careful when making changes to rcu_assign_pointer() and the +- * other macros that it invokes. +- */ +-#define rcu_assign_pointer(p, v) \ +-({ \ +- uintptr_t _r_a_p__v = (uintptr_t)(v); \ +- \ +- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ +- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ +- else \ +- smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ +- _r_a_p__v; \ +-}) +- +-/** + * rcu_access_pointer() - fetch RCU pointer with no dereferencing + * @p: The pointer to read + * +@@ -951,10 +922,14 @@ + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_lock(); ++#else + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_lock_bh() used illegally while idle"); ++#endif + } + + /* +@@ -964,10 +939,14 @@ + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_unlock(); ++#else + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +diff -Nur linux-4.9.28.orig/include/linux/rcutree.h linux-4.9.28/include/linux/rcutree.h +--- linux-4.9.28.orig/include/linux/rcutree.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rcutree.h 2017-05-19 03:37:25.170176339 +0200 +@@ -44,7 +44,11 @@ + rcu_note_context_switch(); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define synchronize_rcu_bh synchronize_rcu ++#else + void synchronize_rcu_bh(void); ++#endif + void synchronize_sched_expedited(void); + void synchronize_rcu_expedited(void); + +@@ -72,7 +76,11 @@ + } + + void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + void rcu_barrier_bh(void); ++#endif + void rcu_barrier_sched(void); + unsigned long get_state_synchronize_rcu(void); + void cond_synchronize_rcu(unsigned long oldstate); +@@ -82,17 +90,14 @@ + extern unsigned long rcutorture_testseq; + extern unsigned long rcutorture_vernum; + unsigned long rcu_batches_started(void); +-unsigned long rcu_batches_started_bh(void); + unsigned long rcu_batches_started_sched(void); + unsigned long rcu_batches_completed(void); +-unsigned long rcu_batches_completed_bh(void); + unsigned long rcu_batches_completed_sched(void); + unsigned long rcu_exp_batches_completed(void); + unsigned long rcu_exp_batches_completed_sched(void); + void show_rcu_gp_kthreads(void); + + void rcu_force_quiescent_state(void); +-void rcu_bh_force_quiescent_state(void); + void rcu_sched_force_quiescent_state(void); + + void rcu_idle_enter(void); +@@ -109,6 +114,16 @@ + + bool rcu_is_watching(void); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++void rcu_bh_force_quiescent_state(void); ++unsigned long rcu_batches_started_bh(void); ++unsigned long rcu_batches_completed_bh(void); ++#else ++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++# define rcu_batches_completed_bh rcu_batches_completed ++# define rcu_batches_started_bh rcu_batches_completed ++#endif ++ + void rcu_all_qs(void); + + /* RCUtree hotplug events */ +diff -Nur linux-4.9.28.orig/include/linux/rtmutex.h linux-4.9.28/include/linux/rtmutex.h +--- linux-4.9.28.orig/include/linux/rtmutex.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rtmutex.h 2017-05-19 03:37:25.170176339 +0200 +@@ -13,11 +13,15 @@ + #define __LINUX_RT_MUTEX_H + + #include <linux/linkage.h> ++#include <linux/spinlock_types_raw.h> + #include <linux/rbtree.h> +-#include <linux/spinlock_types.h> + + extern int max_lock_depth; /* for sysctl */ + ++#ifdef CONFIG_DEBUG_MUTEXES ++#include <linux/debug_locks.h> ++#endif ++ + /** + * The rt_mutex structure + * +@@ -31,8 +35,8 @@ + struct rb_root waiters; + struct rb_node *waiters_leftmost; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; ++#ifdef CONFIG_DEBUG_RT_MUTEXES + const char *name, *file; + int line; + void *magic; +@@ -55,22 +59,33 @@ + # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) + #endif + ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + #ifdef CONFIG_DEBUG_RT_MUTEXES + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); + #else + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) + # define rt_mutex_debug_task_free(t) do { } while (0) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT \ + , .owner = NULL \ +- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) ++ ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } ++ ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +@@ -90,7 +105,9 @@ + extern void rt_mutex_destroy(struct rt_mutex *lock); + + extern void rt_mutex_lock(struct rt_mutex *lock); ++extern int rt_mutex_lock_state(struct rt_mutex *lock, int state); + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); + +diff -Nur linux-4.9.28.orig/include/linux/rwlock_rt.h linux-4.9.28/include/linux/rwlock_rt.h +--- linux-4.9.28.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/rwlock_rt.h 2017-05-19 03:37:25.170176339 +0200 +@@ -0,0 +1,99 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(rwl)->lock); \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) ++ ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = rt_read_lock_irqsave(lock); \ ++ } while (0) ++ ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = rt_write_lock_irqsave(lock); \ ++ } while (0) ++ ++#define read_lock(lock) rt_read_lock(lock) ++ ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_irq(lock) read_lock(lock) ++ ++#define write_lock(lock) rt_write_lock(lock) ++ ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_irq(lock) write_lock(lock) ++ ++#define read_unlock(lock) rt_read_unlock(lock) ++ ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define read_unlock_irq(lock) read_unlock(lock) ++ ++#define write_unlock(lock) rt_write_unlock(lock) ++ ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define write_unlock_irq(lock) write_unlock(lock) ++ ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ } while (0) ++ ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ } while (0) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/rwlock_types.h linux-4.9.28/include/linux/rwlock_types.h +--- linux-4.9.28.orig/include/linux/rwlock_types.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rwlock_types.h 2017-05-19 03:37:25.170176339 +0200 +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif ++ + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +diff -Nur linux-4.9.28.orig/include/linux/rwlock_types_rt.h linux-4.9.28/include/linux/rwlock_types_rt.h +--- linux-4.9.28.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/rwlock_types_rt.h 2017-05-19 03:37:25.170176339 +0200 +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * rwlocks - rtmutex which allows single reader recursion ++ */ ++typedef struct { ++ struct rt_mutex lock; ++ int read_depth; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} rwlock_t; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif ++ ++#define __RW_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name = __RW_LOCK_UNLOCKED(name) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/rwsem.h linux-4.9.28/include/linux/rwsem.h +--- linux-4.9.28.orig/include/linux/rwsem.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/rwsem.h 2017-05-19 03:37:25.174176493 +0200 +@@ -19,6 +19,10 @@ + #include <linux/osq_lock.h> + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include <linux/rwsem_rt.h> ++#else /* PREEMPT_RT_FULL */ ++ + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -106,6 +110,13 @@ + return !list_empty(&sem->wait_list); + } + ++#endif /* !PREEMPT_RT_FULL */ ++ ++/* ++ * The functions below are the same for all rwsem implementations including ++ * the RT specific variant. ++ */ ++ + /* + * lock for reading + */ +diff -Nur linux-4.9.28.orig/include/linux/rwsem_rt.h linux-4.9.28/include/linux/rwsem_rt.h +--- linux-4.9.28.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/rwsem_rt.h 2017-05-19 03:37:25.174176493 +0200 +@@ -0,0 +1,67 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H ++ ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif ++ ++#include <linux/rtmutex.h> ++#include <linux/swait.h> ++ ++#define READER_BIAS (1U << 31) ++#define WRITER_BIAS (1U << 30) ++ ++struct rw_semaphore { ++ atomic_t readers; ++ struct rt_mutex rtmutex; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __RWSEM_INITIALIZER(name) \ ++{ \ ++ .readers = ATOMIC_INIT(READER_BIAS), \ ++ .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \ ++ RW_DEP_MAP_INIT(name) \ ++} ++ ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ ++extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key); ++ ++#define __init_rwsem(sem, name, key) \ ++do { \ ++ rt_mutex_init(&(sem)->rtmutex); \ ++ __rwsem_init((sem), (name), (key)); \ ++} while (0) ++ ++#define init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __init_rwsem((sem), #sem, &__key); \ ++} while (0) ++ ++static inline int rwsem_is_locked(struct rw_semaphore *sem) ++{ ++ return atomic_read(&sem->readers) != READER_BIAS; ++} ++ ++static inline int rwsem_is_contended(struct rw_semaphore *sem) ++{ ++ return atomic_read(&sem->readers) > 0; ++} ++ ++extern void __down_read(struct rw_semaphore *sem); ++extern int __down_read_trylock(struct rw_semaphore *sem); ++extern void __down_write(struct rw_semaphore *sem); ++extern int __must_check __down_write_killable(struct rw_semaphore *sem); ++extern int __down_write_trylock(struct rw_semaphore *sem); ++extern void __up_read(struct rw_semaphore *sem); ++extern void __up_write(struct rw_semaphore *sem); ++extern void __downgrade_write(struct rw_semaphore *sem); ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/sched/rt.h linux-4.9.28/include/linux/sched/rt.h +--- linux-4.9.28.orig/include/linux/sched/rt.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/sched/rt.h 2017-05-19 03:37:25.174176493 +0200 +@@ -16,27 +16,20 @@ + } + + #ifdef CONFIG_RT_MUTEXES +-extern int rt_mutex_getprio(struct task_struct *p); +-extern void rt_mutex_setprio(struct task_struct *p, int prio); +-extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio); +-extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); ++/* ++ * Must hold either p->pi_lock or task_rq(p)->lock. ++ */ ++static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) ++{ ++ return p->pi_top_task; ++} ++extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); + extern void rt_mutex_adjust_pi(struct task_struct *p); + static inline bool tsk_is_pi_blocked(struct task_struct *tsk) + { + return tsk->pi_blocked_on != NULL; + } + #else +-static inline int rt_mutex_getprio(struct task_struct *p) +-{ +- return p->normal_prio; +-} +- +-static inline int rt_mutex_get_effective_prio(struct task_struct *task, +- int newprio) +-{ +- return newprio; +-} +- + static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) + { + return NULL; +diff -Nur linux-4.9.28.orig/include/linux/sched.h linux-4.9.28/include/linux/sched.h +--- linux-4.9.28.orig/include/linux/sched.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/sched.h 2017-05-19 03:37:25.174176493 +0200 +@@ -26,6 +26,7 @@ + #include <linux/nodemask.h> + #include <linux/mm_types.h> + #include <linux/preempt.h> ++#include <asm/kmap_types.h> + + #include <asm/page.h> + #include <asm/ptrace.h> +@@ -243,10 +244,7 @@ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +-#define task_is_stopped_or_traced(task) \ +- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + #define task_contributes_to_load(task) \ + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ +@@ -312,6 +310,11 @@ + + #endif + ++#define __set_current_state_no_track(state_value) \ ++ do { current->state = (state_value); } while (0) ++#define set_current_state_no_track(state_value) \ ++ set_mb(current->state, (state_value)) ++ + /* Task command name length */ + #define TASK_COMM_LEN 16 + +@@ -1013,8 +1016,18 @@ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + + extern void wake_q_add(struct wake_q_head *head, +- struct task_struct *task); +-extern void wake_up_q(struct wake_q_head *head); ++ struct task_struct *task); ++extern void __wake_up_q(struct wake_q_head *head, bool sleeper); ++ ++static inline void wake_up_q(struct wake_q_head *head) ++{ ++ __wake_up_q(head, false); ++} ++ ++static inline void wake_up_q_sleeper(struct wake_q_head *head) ++{ ++ __wake_up_q(head, true); ++} + + /* + * sched-domains (multiprocessor balancing) declarations: +@@ -1481,6 +1494,7 @@ + struct thread_info thread_info; + #endif + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ++ volatile long saved_state; /* saved state for "spinlock sleepers" */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ +@@ -1520,6 +1534,12 @@ + #endif + + unsigned int policy; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int migrate_disable; ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++# endif ++#endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -1658,6 +1678,9 @@ + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + + /* process credentials */ + const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ +@@ -1689,10 +1712,15 @@ + /* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + struct sigpending pending; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* TODO: move me into ->restart_block ? */ ++ struct siginfo forced_info; ++#endif + + unsigned long sas_ss_sp; + size_t sas_ss_size; +@@ -1723,6 +1751,8 @@ + /* PI waiters blocked on a rt_mutex held by this task */ + struct rb_root pi_waiters; + struct rb_node *pi_waiters_leftmost; ++ /* Updated under owner's pi_lock and rq lock */ ++ struct task_struct *pi_top_task; + /* Deadlock detection and priority inheritance handling */ + struct rt_mutex_waiter *pi_blocked_on; + #endif +@@ -1921,6 +1951,12 @@ + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; + #endif /* CONFIG_TRACING */ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ u64 preempt_timestamp_hist; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ long timer_offset; ++#endif ++#endif + #ifdef CONFIG_KCOV + /* Coverage collection mode enabled for this task (0 if disabled). */ + enum kcov_mode kcov_mode; +@@ -1946,9 +1982,23 @@ + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++ int softirq_nestcnt; ++ unsigned int softirqs_raised; ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++# endif ++#endif + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int xmit_recursion; ++#endif + int pagefault_disabled; + #ifdef CONFIG_MMU + struct task_struct *oom_reaper_list; +@@ -1988,14 +2038,6 @@ + } + #endif + +-/* Future-safe accessor for struct task_struct's cpus_allowed. */ +-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +- +-static inline int tsk_nr_cpus_allowed(struct task_struct *p) +-{ +- return p->nr_cpus_allowed; +-} +- + #define TNF_MIGRATED 0x01 + #define TNF_NO_GROUP 0x02 + #define TNF_SHARED 0x04 +@@ -2211,6 +2253,15 @@ + extern void free_task(struct task_struct *tsk); + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); ++ ++static inline void put_task_struct(struct task_struct *t) ++{ ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -2218,6 +2269,7 @@ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); + } ++#endif + + struct task_struct *task_rcu_dereference(struct task_struct **ptask); + struct task_struct *try_get_task_struct(struct task_struct **ptask); +@@ -2259,6 +2311,7 @@ + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_EXITING 0x00000004 /* getting shut down */ + #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +@@ -2427,6 +2480,10 @@ + + extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); ++int migrate_me(void); ++void tell_sched_cpu_down_begin(int cpu); ++void tell_sched_cpu_down_done(int cpu); ++ + #else + static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) +@@ -2439,6 +2496,9 @@ + return -EINVAL; + return 0; + } ++static inline int migrate_me(void) { return 0; } ++static inline void tell_sched_cpu_down_begin(int cpu) { } ++static inline void tell_sched_cpu_down_done(int cpu) { } + #endif + + #ifdef CONFIG_NO_HZ_COMMON +@@ -2677,6 +2737,7 @@ + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct * tsk); + extern void wake_up_new_task(struct task_struct *tsk); + #ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +@@ -2885,6 +2946,17 @@ + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) ++{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); ++} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif ++ + static inline void mmdrop_async_fn(struct work_struct *work) + { + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); +@@ -3277,6 +3349,43 @@ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); ++} ++ ++static inline int need_resched_lazy(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#else ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } ++static inline int need_resched_lazy(void) { return 0; } ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#endif ++ + static inline int restart_syscall(void) + { + set_tsk_thread_flag(current, TIF_SIGPENDING); +@@ -3308,6 +3417,51 @@ + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + } + ++static inline bool __task_is_stopped_or_traced(struct task_struct *task) ++{ ++ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#endif ++ return false; ++} ++ ++static inline bool task_is_stopped_or_traced(struct task_struct *task) ++{ ++ bool traced_stopped; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ traced_stopped = __task_is_stopped_or_traced(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++#else ++ traced_stopped = __task_is_stopped_or_traced(task); ++#endif ++ return traced_stopped; ++} ++ ++static inline bool task_is_traced(struct task_struct *task) ++{ ++ bool traced = false; ++ ++ if (task->state & __TASK_TRACED) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* in case the task is sleeping on tasklist_lock */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ traced = true; ++ else if (task->saved_state & __TASK_TRACED) ++ traced = true; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ return traced; ++} ++ + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +@@ -3333,12 +3487,16 @@ + __cond_resched_lock(lock); \ + }) + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern int __cond_resched_softirq(void); + + #define cond_resched_softirq() ({ \ + ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ + }) ++#else ++# define cond_resched_softirq() cond_resched() ++#endif + + static inline void cond_resched_rcu(void) + { +@@ -3513,6 +3671,31 @@ + + #endif /* CONFIG_SMP */ + ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ return p->migrate_disable; ++#else ++ return 0; ++#endif ++} ++ ++/* Future-safe accessor for struct task_struct's cpus_allowed. */ ++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) ++{ ++ if (__migrate_disabled(p)) ++ return cpumask_of(task_cpu(p)); ++ ++ return &p->cpus_allowed; ++} ++ ++static inline int tsk_nr_cpus_allowed(struct task_struct *p) ++{ ++ if (__migrate_disabled(p)) ++ return 1; ++ return p->nr_cpus_allowed; ++} ++ + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +diff -Nur linux-4.9.28.orig/include/linux/seqlock.h linux-4.9.28/include/linux/seqlock.h +--- linux-4.9.28.orig/include/linux/seqlock.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/seqlock.h 2017-05-19 03:37:25.174176493 +0200 +@@ -220,20 +220,30 @@ + return __read_seqcount_retry(s, start); + } + +- +- +-static inline void raw_write_seqcount_begin(seqcount_t *s) ++static inline void __raw_write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void raw_write_seqcount_end(seqcount_t *s) ++static inline void raw_write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __raw_write_seqcount_begin(s); ++} ++ ++static inline void __raw_write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void raw_write_seqcount_end(seqcount_t *s) ++{ ++ __raw_write_seqcount_end(s); ++ preempt_enable_rt(); ++} ++ + /** + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t +@@ -428,10 +438,32 @@ + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; ++ ++repeat: ++ ret = ACCESS_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_unlock_wait(&sl->lock); ++ goto repeat; ++ } ++ return ret; ++} ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -446,36 +478,45 @@ + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline int try_write_seqlock(seqlock_t *sl) ++{ ++ if (spin_trylock(&sl->lock)) { ++ __raw_write_seqcount_begin(&sl->seqcount); ++ return 1; ++ } ++ return 0; + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -484,7 +525,7 @@ + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -494,7 +535,7 @@ + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +diff -Nur linux-4.9.28.orig/include/linux/signal.h linux-4.9.28/include/linux/signal.h +--- linux-4.9.28.orig/include/linux/signal.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/signal.h 2017-05-19 03:37:25.174176493 +0200 +@@ -233,6 +233,7 @@ + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +diff -Nur linux-4.9.28.orig/include/linux/skbuff.h linux-4.9.28/include/linux/skbuff.h +--- linux-4.9.28.orig/include/linux/skbuff.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/skbuff.h 2017-05-19 03:37:25.174176493 +0200 +@@ -284,6 +284,7 @@ + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -1573,6 +1574,12 @@ + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) ++{ ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); ++} ++ + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +diff -Nur linux-4.9.28.orig/include/linux/smp.h linux-4.9.28/include/linux/smp.h +--- linux-4.9.28.orig/include/linux/smp.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/smp.h 2017-05-19 03:37:25.174176493 +0200 +@@ -120,6 +120,13 @@ + extern void __init setup_nr_cpu_ids(void); + extern void __init smp_init(void); + ++extern int __boot_cpu_id; ++ ++static inline int get_boot_cpu_id(void) ++{ ++ return __boot_cpu_id; ++} ++ + #else /* !SMP */ + + static inline void smp_send_stop(void) { } +@@ -158,6 +165,11 @@ + static inline void smp_init(void) { } + #endif + ++static inline int get_boot_cpu_id(void) ++{ ++ return 0; ++} ++ + #endif /* !SMP */ + + /* +@@ -185,6 +197,9 @@ + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() ++ + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: +diff -Nur linux-4.9.28.orig/include/linux/spinlock_api_smp.h linux-4.9.28/include/linux/spinlock_api_smp.h +--- linux-4.9.28.orig/include/linux/spinlock_api_smp.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/spinlock_api_smp.h 2017-05-19 03:37:25.174176493 +0200 +@@ -189,6 +189,8 @@ + return 0; + } + +-#include <linux/rwlock_api_smp.h> ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include <linux/rwlock_api_smp.h> ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +diff -Nur linux-4.9.28.orig/include/linux/spinlock.h linux-4.9.28/include/linux/spinlock.h +--- linux-4.9.28.orig/include/linux/spinlock.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/spinlock.h 2017-05-19 03:37:25.174176493 +0200 +@@ -271,7 +271,11 @@ + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + + /* Include rwlock functions */ +-#include <linux/rwlock.h> ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include <linux/rwlock_rt.h> ++#else ++# include <linux/rwlock.h> ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -282,6 +286,10 @@ + # include <linux/spinlock_api_up.h> + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include <linux/spinlock_rt.h> ++#else /* PREEMPT_RT_FULL */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -416,4 +424,6 @@ + #define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* __LINUX_SPINLOCK_H */ +diff -Nur linux-4.9.28.orig/include/linux/spinlock_rt.h linux-4.9.28/include/linux/spinlock_rt.h +--- linux-4.9.28.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/spinlock_rt.h 2017-05-19 03:37:25.174176493 +0200 +@@ -0,0 +1,162 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#include <linux/bug.h> ++ ++extern void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); ++ ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) ++ ++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock); ++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock); ++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock); ++ ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); ++ ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ */ ++extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++ ++#define spin_lock(lock) rt_spin_lock(lock) ++ ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_irq(lock) spin_lock(lock) ++ ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ __locked = spin_do_trylock(lock); \ ++ __locked; \ ++}) ++ ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++#define spin_lock_bh_nested(lock, subclass) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) ++# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif ++ ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++ ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; ++} ++ ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) ++ ++#define spin_unlock(lock) rt_spin_unlock(lock) ++ ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define spin_unlock_irq(lock) spin_unlock(lock) ++ ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) ++ ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) ++ ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) ++ ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif ++ ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} ++ ++#define atomic_dec_and_lock(atomic, lock) \ ++ atomic_dec_and_spin_lock(atomic, lock) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/spinlock_types.h linux-4.9.28/include/linux/spinlock_types.h +--- linux-4.9.28.orig/include/linux/spinlock_types.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/spinlock_types.h 2017-05-19 03:37:25.174176493 +0200 +@@ -9,80 +9,15 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include <asm/spinlock_types.h> +-#else +-# include <linux/spinlock_types_up.h> +-#endif +- +-#include <linux/lockdep.h> +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_GENERIC_LOCKBREAK +- unsigned int break_lock; +-#endif +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif ++#include <linux/spinlock_types_raw.h> + +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include <linux/spinlock_types_nort.h> ++# include <linux/rwlock_types.h> + #else +-# define SPIN_DEBUG_INIT(lockname) ++# include <linux/rtmutex.h> ++# include <linux/spinlock_types_rt.h> ++# include <linux/rwlock_types_rt.h> + #endif + +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +- +-#include <linux/rwlock_types.h> +- + #endif /* __LINUX_SPINLOCK_TYPES_H */ +diff -Nur linux-4.9.28.orig/include/linux/spinlock_types_nort.h linux-4.9.28/include/linux/spinlock_types_nort.h +--- linux-4.9.28.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/spinlock_types_nort.h 2017-05-19 03:37:25.174176493 +0200 +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; ++ ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } ++ ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/spinlock_types_raw.h linux-4.9.28/include/linux/spinlock_types_raw.h +--- linux-4.9.28.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/spinlock_types_raw.h 2017-05-19 03:37:25.174176493 +0200 +@@ -0,0 +1,56 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H ++ ++#if defined(CONFIG_SMP) ++# include <asm/spinlock_types.h> ++#else ++# include <linux/spinlock_types_up.h> ++#endif ++ ++#include <linux/lockdep.h> ++ ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_GENERIC_LOCKBREAK ++ unsigned int break_lock; ++#endif ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; ++ ++#define SPINLOCK_MAGIC 0xdead4ead ++ ++#define SPINLOCK_OWNER_INIT ((void *)-1L) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif ++ ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } ++ ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/spinlock_types_rt.h linux-4.9.28/include/linux/spinlock_types_rt.h +--- linux-4.9.28.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/spinlock_types_rt.h 2017-05-19 03:37:25.174176493 +0200 +@@ -0,0 +1,48 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++#include <linux/cache.h> ++ ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; ++ ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif ++ ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ ++ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } ++ ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) ++ ++#endif +diff -Nur linux-4.9.28.orig/include/linux/srcu.h linux-4.9.28/include/linux/srcu.h +--- linux-4.9.28.orig/include/linux/srcu.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/srcu.h 2017-05-19 03:37:25.174176493 +0200 +@@ -84,10 +84,10 @@ + + void process_srcu(struct work_struct *work); + +-#define __SRCU_STRUCT_INIT(name) \ ++#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .completed = -300, \ +- .per_cpu_ref = &name##_srcu_array, \ ++ .per_cpu_ref = &pcpu_name, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ +@@ -119,7 +119,7 @@ + */ + #define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) ++ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array) + #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) + #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) + +diff -Nur linux-4.9.28.orig/include/linux/suspend.h linux-4.9.28/include/linux/suspend.h +--- linux-4.9.28.orig/include/linux/suspend.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/suspend.h 2017-05-19 03:37:25.174176493 +0200 +@@ -193,6 +193,12 @@ + void (*end)(void); + }; + ++#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) ++extern bool pm_in_action; ++#else ++# define pm_in_action false ++#endif ++ + #ifdef CONFIG_SUSPEND + /** + * suspend_set_ops - set platform dependent suspend operations +diff -Nur linux-4.9.28.orig/include/linux/swait.h linux-4.9.28/include/linux/swait.h +--- linux-4.9.28.orig/include/linux/swait.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/swait.h 2017-05-19 03:37:25.174176493 +0200 +@@ -87,6 +87,7 @@ + extern void swake_up(struct swait_queue_head *q); + extern void swake_up_all(struct swait_queue_head *q); + extern void swake_up_locked(struct swait_queue_head *q); ++extern void swake_up_all_locked(struct swait_queue_head *q); + + extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); + extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); +diff -Nur linux-4.9.28.orig/include/linux/swap.h linux-4.9.28/include/linux/swap.h +--- linux-4.9.28.orig/include/linux/swap.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/swap.h 2017-05-19 03:37:25.174176493 +0200 +@@ -11,6 +11,7 @@ + #include <linux/fs.h> + #include <linux/atomic.h> + #include <linux/page-flags.h> ++#include <linux/locallock.h> + #include <asm/page.h> + + struct notifier_block; +@@ -247,7 +248,8 @@ + void *workingset_eviction(struct address_space *mapping, struct page *page); + bool workingset_refault(void *shadow); + void workingset_activation(struct page *page); +-extern struct list_lru workingset_shadow_nodes; ++extern struct list_lru __workingset_shadow_nodes; ++DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); + + static inline unsigned int workingset_node_pages(struct radix_tree_node *node) + { +@@ -292,6 +294,7 @@ + + + /* linux/mm/swap.c */ ++DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); + extern void lru_cache_add(struct page *); + extern void lru_cache_add_anon(struct page *page); + extern void lru_cache_add_file(struct page *page); +diff -Nur linux-4.9.28.orig/include/linux/swork.h linux-4.9.28/include/linux/swork.h +--- linux-4.9.28.orig/include/linux/swork.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/linux/swork.h 2017-05-19 03:37:25.174176493 +0200 +@@ -0,0 +1,24 @@ ++#ifndef _LINUX_SWORK_H ++#define _LINUX_SWORK_H ++ ++#include <linux/list.h> ++ ++struct swork_event { ++ struct list_head item; ++ unsigned long flags; ++ void (*func)(struct swork_event *); ++}; ++ ++static inline void INIT_SWORK(struct swork_event *event, ++ void (*func)(struct swork_event *)) ++{ ++ event->flags = 0; ++ event->func = func; ++} ++ ++bool swork_queue(struct swork_event *sev); ++ ++int swork_get(void); ++void swork_put(void); ++ ++#endif /* _LINUX_SWORK_H */ +diff -Nur linux-4.9.28.orig/include/linux/thread_info.h linux-4.9.28/include/linux/thread_info.h +--- linux-4.9.28.orig/include/linux/thread_info.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/thread_info.h 2017-05-19 03:37:25.174176493 +0200 +@@ -107,7 +107,17 @@ + #define test_thread_flag(flag) \ + test_ti_thread_flag(current_thread_info(), flag) + +-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#ifdef CONFIG_PREEMPT_LAZY ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++ ++#else ++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_lazy() 0 ++#endif + + #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES + static inline int arch_within_stack_frames(const void * const stack, +diff -Nur linux-4.9.28.orig/include/linux/timer.h linux-4.9.28/include/linux/timer.h +--- linux-4.9.28.orig/include/linux/timer.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/timer.h 2017-05-19 03:37:25.174176493 +0200 +@@ -241,7 +241,7 @@ + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +diff -Nur linux-4.9.28.orig/include/linux/trace_events.h linux-4.9.28/include/linux/trace_events.h +--- linux-4.9.28.orig/include/linux/trace_events.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/trace_events.h 2017-05-19 03:37:25.174176493 +0200 +@@ -56,6 +56,9 @@ + unsigned char flags; + unsigned char preempt_count; + int pid; ++ unsigned short migrate_disable; ++ unsigned short padding; ++ unsigned char preempt_lazy_count; + }; + + #define TRACE_EVENT_TYPE_MAX \ +diff -Nur linux-4.9.28.orig/include/linux/uaccess.h linux-4.9.28/include/linux/uaccess.h +--- linux-4.9.28.orig/include/linux/uaccess.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/uaccess.h 2017-05-19 03:37:25.178176648 +0200 +@@ -24,6 +24,7 @@ + */ + static inline void pagefault_disable(void) + { ++ migrate_disable(); + pagefault_disabled_inc(); + /* + * make sure to have issued the store before a pagefault +@@ -40,6 +41,7 @@ + */ + barrier(); + pagefault_disabled_dec(); ++ migrate_enable(); + } + + /* +diff -Nur linux-4.9.28.orig/include/linux/uprobes.h linux-4.9.28/include/linux/uprobes.h +--- linux-4.9.28.orig/include/linux/uprobes.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/uprobes.h 2017-05-19 03:37:25.178176648 +0200 +@@ -27,6 +27,7 @@ + #include <linux/errno.h> + #include <linux/rbtree.h> + #include <linux/types.h> ++#include <linux/wait.h> + + struct vm_area_struct; + struct mm_struct; +diff -Nur linux-4.9.28.orig/include/linux/vmstat.h linux-4.9.28/include/linux/vmstat.h +--- linux-4.9.28.orig/include/linux/vmstat.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/vmstat.h 2017-05-19 03:37:25.178176648 +0200 +@@ -33,7 +33,9 @@ + */ + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + raw_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -43,7 +45,9 @@ + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + raw_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +diff -Nur linux-4.9.28.orig/include/linux/wait.h linux-4.9.28/include/linux/wait.h +--- linux-4.9.28.orig/include/linux/wait.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/linux/wait.h 2017-05-19 03:37:25.178176648 +0200 +@@ -8,6 +8,7 @@ + #include <linux/spinlock.h> + #include <asm/current.h> + #include <uapi/linux/wait.h> ++#include <linux/atomic.h> + + typedef struct __wait_queue wait_queue_t; + typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); +diff -Nur linux-4.9.28.orig/include/net/dst.h linux-4.9.28/include/net/dst.h +--- linux-4.9.28.orig/include/net/dst.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/net/dst.h 2017-05-19 03:37:25.178176648 +0200 +@@ -446,7 +446,7 @@ + static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) + { +- const struct hh_cache *hh; ++ struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; +diff -Nur linux-4.9.28.orig/include/net/gen_stats.h linux-4.9.28/include/net/gen_stats.h +--- linux-4.9.28.orig/include/net/gen_stats.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/net/gen_stats.h 2017-05-19 03:37:25.178176648 +0200 +@@ -5,6 +5,7 @@ + #include <linux/socket.h> + #include <linux/rtnetlink.h> + #include <linux/pkt_sched.h> ++#include <net/net_seq_lock.h> + + struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; +@@ -33,11 +34,11 @@ + spinlock_t *lock, struct gnet_dump *d, + int padattr); + +-int gnet_stats_copy_basic(const seqcount_t *running, ++int gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); +-void __gnet_stats_copy_basic(const seqcount_t *running, ++void __gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); +@@ -55,14 +56,14 @@ + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct gnet_stats_rate_est64 *rate_est, + spinlock_t *stats_lock, +- seqcount_t *running, struct nlattr *opt); ++ net_seqlock_t *running, struct nlattr *opt); + void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_rate_est64 *rate_est); + int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct gnet_stats_rate_est64 *rate_est, + spinlock_t *stats_lock, +- seqcount_t *running, struct nlattr *opt); ++ net_seqlock_t *running, struct nlattr *opt); + bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, + const struct gnet_stats_rate_est64 *rate_est); + #endif +diff -Nur linux-4.9.28.orig/include/net/neighbour.h linux-4.9.28/include/net/neighbour.h +--- linux-4.9.28.orig/include/net/neighbour.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/net/neighbour.h 2017-05-19 03:37:25.178176648 +0200 +@@ -446,7 +446,7 @@ + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int seq; + int hh_len; +@@ -501,7 +501,7 @@ + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; +diff -Nur linux-4.9.28.orig/include/net/netns/ipv4.h linux-4.9.28/include/net/netns/ipv4.h +--- linux-4.9.28.orig/include/net/netns/ipv4.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/net/netns/ipv4.h 2017-05-19 03:37:25.178176648 +0200 +@@ -69,6 +69,7 @@ + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_echo_sysrq; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; +diff -Nur linux-4.9.28.orig/include/net/net_seq_lock.h linux-4.9.28/include/net/net_seq_lock.h +--- linux-4.9.28.orig/include/net/net_seq_lock.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/net/net_seq_lock.h 2017-05-19 03:37:25.178176648 +0200 +@@ -0,0 +1,15 @@ ++#ifndef __NET_NET_SEQ_LOCK_H__ ++#define __NET_NET_SEQ_LOCK_H__ ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define net_seqlock_t seqlock_t ++# define net_seq_begin(__r) read_seqbegin(__r) ++# define net_seq_retry(__r, __s) read_seqretry(__r, __s) ++ ++#else ++# define net_seqlock_t seqcount_t ++# define net_seq_begin(__r) read_seqcount_begin(__r) ++# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s) ++#endif ++ ++#endif +diff -Nur linux-4.9.28.orig/include/net/sch_generic.h linux-4.9.28/include/net/sch_generic.h +--- linux-4.9.28.orig/include/net/sch_generic.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/net/sch_generic.h 2017-05-19 03:37:25.178176648 +0200 +@@ -10,6 +10,7 @@ + #include <linux/dynamic_queue_limits.h> + #include <net/gen_stats.h> + #include <net/rtnetlink.h> ++#include <net/net_seq_lock.h> + + struct Qdisc_ops; + struct qdisc_walker; +@@ -86,7 +87,7 @@ + struct sk_buff *gso_skb ____cacheline_aligned_in_smp; + struct qdisc_skb_head q; + struct gnet_stats_basic_packed bstats; +- seqcount_t running; ++ net_seqlock_t running; + struct gnet_stats_queue qstats; + unsigned long state; + struct Qdisc *next_sched; +@@ -98,13 +99,22 @@ + spinlock_t busylock ____cacheline_aligned_in_smp; + }; + +-static inline bool qdisc_is_running(const struct Qdisc *qdisc) ++static inline bool qdisc_is_running(struct Qdisc *qdisc) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ return spin_is_locked(&qdisc->running.lock) ? true : false; ++#else + return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; ++#endif + } + + static inline bool qdisc_run_begin(struct Qdisc *qdisc) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (try_write_seqlock(&qdisc->running)) ++ return true; ++ return false; ++#else + if (qdisc_is_running(qdisc)) + return false; + /* Variant of write_seqcount_begin() telling lockdep a trylock +@@ -113,11 +123,16 @@ + raw_write_seqcount_begin(&qdisc->running); + seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); + return true; ++#endif + } + + static inline void qdisc_run_end(struct Qdisc *qdisc) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ write_sequnlock(&qdisc->running); ++#else + write_seqcount_end(&qdisc->running); ++#endif + } + + static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) +@@ -308,7 +323,7 @@ + return qdisc_lock(root); + } + +-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) ++static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) + { + struct Qdisc *root = qdisc_root_sleeping(qdisc); + +diff -Nur linux-4.9.28.orig/include/trace/events/hist.h linux-4.9.28/include/trace/events/hist.h +--- linux-4.9.28.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/trace/events/hist.h 2017-05-19 03:37:25.178176648 +0200 +@@ -0,0 +1,73 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM hist ++ ++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_HIST_H ++ ++#include "latency_hist.h" ++#include <linux/tracepoint.h> ++ ++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) ++#define trace_preemptirqsoff_hist(a, b) ++#define trace_preemptirqsoff_hist_rcuidle(a, b) ++#else ++TRACE_EVENT(preemptirqsoff_hist, ++ ++ TP_PROTO(int reason, int starthist), ++ ++ TP_ARGS(reason, starthist), ++ ++ TP_STRUCT__entry( ++ __field(int, reason) ++ __field(int, starthist) ++ ), ++ ++ TP_fast_assign( ++ __entry->reason = reason; ++ __entry->starthist = starthist; ++ ), ++ ++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), ++ __entry->starthist ? "start" : "stop") ++); ++#endif ++ ++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST ++#define trace_hrtimer_interrupt(a, b, c, d) ++#else ++TRACE_EVENT(hrtimer_interrupt, ++ ++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, ++ struct task_struct *task), ++ ++ TP_ARGS(cpu, offset, curr, task), ++ ++ TP_STRUCT__entry( ++ __field(int, cpu) ++ __field(long long, offset) ++ __array(char, ccomm, TASK_COMM_LEN) ++ __field(int, cprio) ++ __array(char, tcomm, TASK_COMM_LEN) ++ __field(int, tprio) ++ ), ++ ++ TP_fast_assign( ++ __entry->cpu = cpu; ++ __entry->offset = offset; ++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); ++ __entry->cprio = curr->prio; ++ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", ++ task != NULL ? TASK_COMM_LEN : 7); ++ __entry->tprio = task != NULL ? task->prio : -1; ++ ), ++ ++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", ++ __entry->cpu, __entry->offset, __entry->ccomm, ++ __entry->cprio, __entry->tcomm, __entry->tprio) ++); ++#endif ++ ++#endif /* _TRACE_HIST_H */ ++ ++/* This part must be outside protection */ ++#include <trace/define_trace.h> +diff -Nur linux-4.9.28.orig/include/trace/events/latency_hist.h linux-4.9.28/include/trace/events/latency_hist.h +--- linux-4.9.28.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/include/trace/events/latency_hist.h 2017-05-19 03:37:25.178176648 +0200 +@@ -0,0 +1,29 @@ ++#ifndef _LATENCY_HIST_H ++#define _LATENCY_HIST_H ++ ++enum hist_action { ++ IRQS_ON, ++ PREEMPT_ON, ++ TRACE_STOP, ++ IRQS_OFF, ++ PREEMPT_OFF, ++ TRACE_START, ++}; ++ ++static char *actions[] = { ++ "IRQS_ON", ++ "PREEMPT_ON", ++ "TRACE_STOP", ++ "IRQS_OFF", ++ "PREEMPT_OFF", ++ "TRACE_START", ++}; ++ ++static inline char *getaction(int action) ++{ ++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) ++ return actions[action]; ++ return "unknown"; ++} ++ ++#endif /* _LATENCY_HIST_H */ +diff -Nur linux-4.9.28.orig/include/trace/events/sched.h linux-4.9.28/include/trace/events/sched.h +--- linux-4.9.28.orig/include/trace/events/sched.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/include/trace/events/sched.h 2017-05-19 03:37:25.178176648 +0200 +@@ -70,7 +70,7 @@ + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; +- __entry->prio = p->prio; ++ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ + __entry->success = 1; /* rudiment, kill when possible */ + __entry->target_cpu = task_cpu(p); + ), +@@ -147,6 +147,7 @@ + memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); + __entry->next_pid = next->pid; + __entry->next_prio = next->prio; ++ /* XXX SCHED_DEADLINE */ + ), + + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", +@@ -181,7 +182,7 @@ + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; +- __entry->prio = p->prio; ++ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ + __entry->orig_cpu = task_cpu(p); + __entry->dest_cpu = dest_cpu; + ), +@@ -206,7 +207,7 @@ + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; +- __entry->prio = p->prio; ++ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ + ), + + TP_printk("comm=%s pid=%d prio=%d", +@@ -253,7 +254,7 @@ + TP_fast_assign( + memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + __entry->pid = pid_nr(pid); +- __entry->prio = current->prio; ++ __entry->prio = current->prio; /* XXX SCHED_DEADLINE */ + ), + + TP_printk("comm=%s pid=%d prio=%d", +@@ -413,9 +414,9 @@ + */ + TRACE_EVENT(sched_pi_setprio, + +- TP_PROTO(struct task_struct *tsk, int newprio), ++ TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task), + +- TP_ARGS(tsk, newprio), ++ TP_ARGS(tsk, pi_task), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) +@@ -428,7 +429,8 @@ + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->oldprio = tsk->prio; +- __entry->newprio = newprio; ++ __entry->newprio = pi_task ? pi_task->prio : tsk->prio; ++ /* XXX SCHED_DEADLINE bits missing */ + ), + + TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", +diff -Nur linux-4.9.28.orig/init/Kconfig linux-4.9.28/init/Kconfig +--- linux-4.9.28.orig/init/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/init/Kconfig 2017-05-19 03:37:25.178176648 +0200 +@@ -506,7 +506,7 @@ + + config RCU_EXPERT + bool "Make expert-level adjustments to RCU configuration" +- default n ++ default y if PREEMPT_RT_FULL + help + This option needs to be enabled if you wish to make + expert-level adjustments to RCU configuration. By default, +@@ -623,7 +623,7 @@ + + config RCU_FAST_NO_HZ + bool "Accelerate last non-dyntick-idle CPU's grace periods" +- depends on NO_HZ_COMMON && SMP && RCU_EXPERT ++ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL + default n + help + This option permits CPUs to enter dynticks-idle state even if +@@ -650,7 +650,7 @@ + config RCU_BOOST + bool "Enable RCU priority boosting" + depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT +- default n ++ default y if PREEMPT_RT_FULL + help + This option boosts the priority of preempted RCU readers that + block the current preemptible RCU grace period for too long. +@@ -781,19 +781,6 @@ + + endchoice + +-config RCU_EXPEDITE_BOOT +- bool +- default n +- help +- This option enables expedited grace periods at boot time, +- as if rcu_expedite_gp() had been invoked early in boot. +- The corresponding rcu_unexpedite_gp() is invoked from +- rcu_end_inkernel_boot(), which is intended to be invoked +- at the end of the kernel-only boot sequence, just before +- init is exec'ed. +- +- Accept the default if unsure. +- + endmenu # "RCU Subsystem" + + config BUILD_BIN2C +@@ -1064,6 +1051,7 @@ + config RT_GROUP_SCHED + bool "Group scheduling for SCHED_RR/FIFO" + depends on CGROUP_SCHED ++ depends on !PREEMPT_RT_FULL + default n + help + This feature lets you explicitly allocate real CPU bandwidth +@@ -1772,6 +1760,7 @@ + + config SLAB + bool "SLAB" ++ depends on !PREEMPT_RT_FULL + select HAVE_HARDENED_USERCOPY_ALLOCATOR + help + The regular slab allocator that is established and known to work +@@ -1792,6 +1781,7 @@ + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but +@@ -1810,7 +1800,7 @@ + + config SLUB_CPU_PARTIAL + default y +- depends on SLUB && SMP ++ depends on SLUB && SMP && !PREEMPT_RT_FULL + bool "SLUB per cpu partial cache" + help + Per cpu partial caches accellerate objects allocation and freeing +diff -Nur linux-4.9.28.orig/init/main.c linux-4.9.28/init/main.c +--- linux-4.9.28.orig/init/main.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/init/main.c 2017-05-19 03:37:25.178176648 +0200 +@@ -507,6 +507,7 @@ + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); ++ softirq_early_init(); + boot_cpu_state_init(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + +diff -Nur linux-4.9.28.orig/init/Makefile linux-4.9.28/init/Makefile +--- linux-4.9.28.orig/init/Makefile 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/init/Makefile 2017-05-19 03:37:25.178176648 +0200 +@@ -35,4 +35,4 @@ + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +diff -Nur linux-4.9.28.orig/ipc/sem.c linux-4.9.28/ipc/sem.c +--- linux-4.9.28.orig/ipc/sem.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/ipc/sem.c 2017-05-19 03:37:25.178176648 +0200 +@@ -712,6 +712,13 @@ + static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *p = q->sleeper; ++ get_task_struct(p); ++ q->status = error; ++ wake_up_process(p); ++ put_task_struct(p); ++#else + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the +@@ -723,6 +730,7 @@ + q->pid = error; + + list_add_tail(&q->list, pt); ++#endif + } + + /** +@@ -736,6 +744,7 @@ + */ + static void wake_up_sem_queue_do(struct list_head *pt) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + struct sem_queue *q, *t; + int did_something; + +@@ -748,6 +757,7 @@ + } + if (did_something) + preempt_enable(); ++#endif + } + + static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +diff -Nur linux-4.9.28.orig/kernel/cgroup.c linux-4.9.28/kernel/cgroup.c +--- linux-4.9.28.orig/kernel/cgroup.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/cgroup.c 2017-05-19 03:37:25.178176648 +0200 +@@ -5041,10 +5041,10 @@ + queue_work(cgroup_destroy_wq, &css->destroy_work); + } + +-static void css_release_work_fn(struct work_struct *work) ++static void css_release_work_fn(struct swork_event *sev) + { + struct cgroup_subsys_state *css = +- container_of(work, struct cgroup_subsys_state, destroy_work); ++ container_of(sev, struct cgroup_subsys_state, destroy_swork); + struct cgroup_subsys *ss = css->ss; + struct cgroup *cgrp = css->cgroup; + +@@ -5087,8 +5087,8 @@ + struct cgroup_subsys_state *css = + container_of(ref, struct cgroup_subsys_state, refcnt); + +- INIT_WORK(&css->destroy_work, css_release_work_fn); +- queue_work(cgroup_destroy_wq, &css->destroy_work); ++ INIT_SWORK(&css->destroy_swork, css_release_work_fn); ++ swork_queue(&css->destroy_swork); + } + + static void init_and_link_css(struct cgroup_subsys_state *css, +@@ -5740,6 +5740,7 @@ + */ + cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); + BUG_ON(!cgroup_destroy_wq); ++ BUG_ON(swork_get()); + + /* + * Used to destroy pidlists and separate to serve as flush domain. +diff -Nur linux-4.9.28.orig/kernel/cpu.c linux-4.9.28/kernel/cpu.c +--- linux-4.9.28.orig/kernel/cpu.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/cpu.c 2017-05-19 03:37:25.182176801 +0200 +@@ -239,6 +239,289 @@ + #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) + #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) + ++/** ++ * hotplug_pcp - per cpu hotplug descriptor ++ * @unplug: set when pin_current_cpu() needs to sync tasks ++ * @sync_tsk: the task that waits for tasks to finish pinned sections ++ * @refcount: counter of tasks in pinned sections ++ * @grab_lock: set when the tasks entering pinned sections should wait ++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished ++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) ++ * @mutex_init: zero if the mutex hasn't been initialized yet. ++ * ++ * Although @unplug and @sync_tsk may point to the same task, the @unplug ++ * is used as a flag and still exists after @sync_tsk has exited and ++ * @sync_tsk set to NULL. ++ */ ++struct hotplug_pcp { ++ struct task_struct *unplug; ++ struct task_struct *sync_tsk; ++ int refcount; ++ int grab_lock; ++ struct completion synced; ++ struct completion unplug_wait; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * Note, on PREEMPT_RT, the hotplug lock must save the state of ++ * the task, otherwise the mutex will cause the task to fail ++ * to sleep when required. (Because it's called from migrate_disable()) ++ * ++ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's ++ * state. ++ */ ++ spinlock_t lock; ++#else ++ struct mutex mutex; ++#endif ++ int mutex_init; ++}; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock) ++# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock) ++#else ++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) ++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) ++#endif ++ ++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); ++ ++/** ++ * pin_current_cpu - Prevent the current cpu from being unplugged ++ * ++ * Lightweight version of get_online_cpus() to prevent cpu from being ++ * unplugged when code runs in a migration disabled region. ++ * ++ * Must be called with preemption disabled (preempt_count = 1)! ++ */ ++void pin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp; ++ int force = 0; ++ ++retry: ++ hp = this_cpu_ptr(&hotplug_pcp); ++ ++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || ++ hp->unplug == current) { ++ hp->refcount++; ++ return; ++ } ++ if (hp->grab_lock) { ++ preempt_enable(); ++ hotplug_lock(hp); ++ hotplug_unlock(hp); ++ } else { ++ preempt_enable(); ++ /* ++ * Try to push this task off of this CPU. ++ */ ++ if (!migrate_me()) { ++ preempt_disable(); ++ hp = this_cpu_ptr(&hotplug_pcp); ++ if (!hp->grab_lock) { ++ /* ++ * Just let it continue it's already pinned ++ * or about to sleep. ++ */ ++ force = 1; ++ goto retry; ++ } ++ preempt_enable(); ++ } ++ } ++ preempt_disable(); ++ goto retry; ++} ++ ++/** ++ * unpin_current_cpu - Allow unplug of current cpu ++ * ++ * Must be called with preemption or interrupts disabled! ++ */ ++void unpin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); ++ ++ WARN_ON(hp->refcount <= 0); ++ ++ /* This is safe. sync_unplug_thread is pinned to this cpu */ ++ if (!--hp->refcount && hp->unplug && hp->unplug != current) ++ wake_up_process(hp->unplug); ++} ++ ++static void wait_for_pinned_cpus(struct hotplug_pcp *hp) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++} ++ ++static int sync_unplug_thread(void *data) ++{ ++ struct hotplug_pcp *hp = data; ++ ++ wait_for_completion(&hp->unplug_wait); ++ preempt_disable(); ++ hp->unplug = current; ++ wait_for_pinned_cpus(hp); ++ ++ /* ++ * This thread will synchronize the cpu_down() with threads ++ * that have pinned the CPU. When the pinned CPU count reaches ++ * zero, we inform the cpu_down code to continue to the next step. ++ */ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ preempt_enable(); ++ complete(&hp->synced); ++ ++ /* ++ * If all succeeds, the next step will need tasks to wait till ++ * the CPU is offline before continuing. To do this, the grab_lock ++ * is set and tasks going into pin_current_cpu() will block on the ++ * mutex. But we still need to wait for those that are already in ++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() ++ * will kick this thread out. ++ */ ++ while (!hp->grab_lock && !kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ ++ /* Make sure grab_lock is seen before we see a stale completion */ ++ smp_mb(); ++ ++ /* ++ * Now just before cpu_down() enters stop machine, we need to make ++ * sure all tasks that are in pinned CPU sections are out, and new ++ * tasks will now grab the lock, keeping them from entering pinned ++ * CPU sections. ++ */ ++ if (!kthread_should_stop()) { ++ preempt_disable(); ++ wait_for_pinned_cpus(hp); ++ preempt_enable(); ++ complete(&hp->synced); ++ } ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ set_current_state(TASK_RUNNING); ++ ++ /* ++ * Force this thread off this CPU as it's going down and ++ * we don't want any more work on this CPU. ++ */ ++ current->flags &= ~PF_NO_SETAFFINITY; ++ set_cpus_allowed_ptr(current, cpu_present_mask); ++ migrate_me(); ++ return 0; ++} ++ ++static void __cpu_unplug_sync(struct hotplug_pcp *hp) ++{ ++ wake_up_process(hp->sync_tsk); ++ wait_for_completion(&hp->synced); ++} ++ ++static void __cpu_unplug_wait(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ complete(&hp->unplug_wait); ++ wait_for_completion(&hp->synced); ++} ++ ++/* ++ * Start the sync_unplug_thread on the target cpu and wait for it to ++ * complete. ++ */ ++static int cpu_unplug_begin(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ int err; ++ ++ /* Protected by cpu_hotplug.lock */ ++ if (!hp->mutex_init) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spin_lock_init(&hp->lock); ++#else ++ mutex_init(&hp->mutex); ++#endif ++ hp->mutex_init = 1; ++ } ++ ++ /* Inform the scheduler to migrate tasks off this CPU */ ++ tell_sched_cpu_down_begin(cpu); ++ ++ init_completion(&hp->synced); ++ init_completion(&hp->unplug_wait); ++ ++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); ++ if (IS_ERR(hp->sync_tsk)) { ++ err = PTR_ERR(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++ return err; ++ } ++ kthread_bind(hp->sync_tsk, cpu); ++ ++ /* ++ * Wait for tasks to get out of the pinned sections, ++ * it's still OK if new tasks enter. Some CPU notifiers will ++ * wait for tasks that are going to enter these sections and ++ * we must not have them block. ++ */ ++ wake_up_process(hp->sync_tsk); ++ return 0; ++} ++ ++static void cpu_unplug_sync(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ init_completion(&hp->synced); ++ /* The completion needs to be initialzied before setting grab_lock */ ++ smp_wmb(); ++ ++ /* Grab the mutex before setting grab_lock */ ++ hotplug_lock(hp); ++ hp->grab_lock = 1; ++ ++ /* ++ * The CPU notifiers have been completed. ++ * Wait for tasks to get out of pinned CPU sections and have new ++ * tasks block until the CPU is completely down. ++ */ ++ __cpu_unplug_sync(hp); ++ ++ /* All done with the sync thread */ ++ kthread_stop(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++} ++ ++static void cpu_unplug_done(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ hp->unplug = NULL; ++ /* Let all tasks know cpu unplug is finished before cleaning up */ ++ smp_wmb(); ++ ++ if (hp->sync_tsk) ++ kthread_stop(hp->sync_tsk); ++ ++ if (hp->grab_lock) { ++ hotplug_unlock(hp); ++ /* protected by cpu_hotplug.lock */ ++ hp->grab_lock = 0; ++ } ++ tell_sched_cpu_down_done(cpu); ++} + + void get_online_cpus(void) + { +@@ -789,10 +1072,14 @@ + struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); + int err; + ++ __cpu_unplug_wait(cpu); + /* Park the smpboot threads */ + kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); + smpboot_park_threads(cpu); + ++ /* Notifiers are done. Don't let any more tasks pin this CPU. */ ++ cpu_unplug_sync(cpu); ++ + /* + * Prevent irq alloc/free while the dying cpu reorganizes the + * interrupt affinities. +@@ -877,6 +1164,9 @@ + struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); + int prev_state, ret = 0; + bool hasdied = false; ++ int mycpu; ++ cpumask_var_t cpumask; ++ cpumask_var_t cpumask_org; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -884,7 +1174,34 @@ + if (!cpu_present(cpu)) + return -EINVAL; + ++ /* Move the downtaker off the unplug cpu */ ++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { ++ free_cpumask_var(cpumask); ++ return -ENOMEM; ++ } ++ ++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); ++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); ++ set_cpus_allowed_ptr(current, cpumask); ++ free_cpumask_var(cpumask); ++ migrate_disable(); ++ mycpu = smp_processor_id(); ++ if (mycpu == cpu) { ++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); ++ migrate_enable(); ++ ret = -EBUSY; ++ goto restore_cpus; ++ } ++ ++ migrate_enable(); + cpu_hotplug_begin(); ++ ret = cpu_unplug_begin(cpu); ++ if (ret) { ++ printk("cpu_unplug_begin(%d) failed\n", cpu); ++ goto out_cancel; ++ } + + cpuhp_tasks_frozen = tasks_frozen; + +@@ -923,10 +1240,15 @@ + + hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; + out: ++ cpu_unplug_done(cpu); ++out_cancel: + cpu_hotplug_done(); + /* This post dead nonsense must die */ + if (!ret && hasdied) + cpu_notify_nofail(CPU_POST_DEAD, cpu); ++restore_cpus: ++ set_cpus_allowed_ptr(current, cpumask_org); ++ free_cpumask_var(cpumask_org); + return ret; + } + +@@ -1240,6 +1562,8 @@ + + #endif /* CONFIG_PM_SLEEP_SMP */ + ++int __boot_cpu_id; ++ + #endif /* CONFIG_SMP */ + + /* Boot processor state steps */ +@@ -1924,6 +2248,10 @@ + set_cpu_active(cpu, true); + set_cpu_present(cpu, true); + set_cpu_possible(cpu, true); ++ ++#ifdef CONFIG_SMP ++ __boot_cpu_id = cpu; ++#endif + } + + /* +diff -Nur linux-4.9.28.orig/kernel/cpuset.c linux-4.9.28/kernel/cpuset.c +--- linux-4.9.28.orig/kernel/cpuset.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/cpuset.c 2017-05-19 03:37:25.182176801 +0200 +@@ -284,7 +284,7 @@ + */ + + static DEFINE_MUTEX(cpuset_mutex); +-static DEFINE_SPINLOCK(callback_lock); ++static DEFINE_RAW_SPINLOCK(callback_lock); + + static struct workqueue_struct *cpuset_migrate_mm_wq; + +@@ -907,9 +907,9 @@ + continue; + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cp->effective_cpus, new_cpus); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && + !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); +@@ -974,9 +974,9 @@ + if (retval < 0) + return retval; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* use trialcs->cpus_allowed as a temp variable */ + update_cpumasks_hier(cs, trialcs->cpus_allowed); +@@ -1176,9 +1176,9 @@ + continue; + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cp->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && + !nodes_equal(cp->mems_allowed, cp->effective_mems)); +@@ -1246,9 +1246,9 @@ + if (retval < 0) + goto done; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->mems_allowed = trialcs->mems_allowed; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* use trialcs->mems_allowed as a temp variable */ + update_nodemasks_hier(cs, &trialcs->mems_allowed); +@@ -1339,9 +1339,9 @@ + spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) + || (is_spread_page(cs) != is_spread_page(trialcs))); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->flags = trialcs->flags; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) + rebuild_sched_domains_locked(); +@@ -1756,7 +1756,7 @@ + cpuset_filetype_t type = seq_cft(sf)->private; + int ret = 0; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + switch (type) { + case FILE_CPULIST: +@@ -1775,7 +1775,7 @@ + ret = -EINVAL; + } + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + return ret; + } + +@@ -1989,12 +1989,12 @@ + + cpuset_inc(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { + cpumask_copy(cs->effective_cpus, parent->effective_cpus); + cs->effective_mems = parent->effective_mems; + } +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) + goto out_unlock; +@@ -2021,12 +2021,12 @@ + } + rcu_read_unlock(); + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cs->mems_allowed = parent->mems_allowed; + cs->effective_mems = parent->mems_allowed; + cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); + cpumask_copy(cs->effective_cpus, parent->cpus_allowed); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + out_unlock: + mutex_unlock(&cpuset_mutex); + return 0; +@@ -2065,7 +2065,7 @@ + static void cpuset_bind(struct cgroup_subsys_state *root_css) + { + mutex_lock(&cpuset_mutex); +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + + if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { + cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); +@@ -2076,7 +2076,7 @@ + top_cpuset.mems_allowed = top_cpuset.effective_mems; + } + +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + mutex_unlock(&cpuset_mutex); + } + +@@ -2177,12 +2177,12 @@ + { + bool is_empty; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->cpus_allowed, new_cpus); + cpumask_copy(cs->effective_cpus, new_cpus); + cs->mems_allowed = *new_mems; + cs->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + /* + * Don't call update_tasks_cpumask() if the cpuset becomes empty, +@@ -2219,10 +2219,10 @@ + if (nodes_empty(*new_mems)) + *new_mems = parent_cs(cs)->effective_mems; + +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + cpumask_copy(cs->effective_cpus, new_cpus); + cs->effective_mems = *new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + + if (cpus_updated) + update_tasks_cpumask(cs); +@@ -2308,21 +2308,21 @@ + + /* synchronize cpus_allowed to cpu_active_mask */ + if (cpus_updated) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (!on_dfl) + cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); + cpumask_copy(top_cpuset.effective_cpus, &new_cpus); +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + /* we don't mess with cpumasks of tasks in top_cpuset */ + } + + /* synchronize mems_allowed to N_MEMORY */ + if (mems_updated) { +- spin_lock_irq(&callback_lock); ++ raw_spin_lock_irq(&callback_lock); + if (!on_dfl) + top_cpuset.mems_allowed = new_mems; + top_cpuset.effective_mems = new_mems; +- spin_unlock_irq(&callback_lock); ++ raw_spin_unlock_irq(&callback_lock); + update_tasks_nodemask(&top_cpuset); + } + +@@ -2420,11 +2420,11 @@ + { + unsigned long flags; + +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + rcu_read_lock(); + guarantee_online_cpus(task_cs(tsk), pmask); + rcu_read_unlock(); +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + } + + void cpuset_cpus_allowed_fallback(struct task_struct *tsk) +@@ -2472,11 +2472,11 @@ + nodemask_t mask; + unsigned long flags; + +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + rcu_read_lock(); + guarantee_online_mems(task_cs(tsk), &mask); + rcu_read_unlock(); +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + + return mask; + } +@@ -2568,14 +2568,14 @@ + return true; + + /* Not hardwall and node outside mems_allowed: scan up cpusets */ +- spin_lock_irqsave(&callback_lock, flags); ++ raw_spin_lock_irqsave(&callback_lock, flags); + + rcu_read_lock(); + cs = nearest_hardwall_ancestor(task_cs(current)); + allowed = node_isset(node, cs->mems_allowed); + rcu_read_unlock(); + +- spin_unlock_irqrestore(&callback_lock, flags); ++ raw_spin_unlock_irqrestore(&callback_lock, flags); + return allowed; + } + +diff -Nur linux-4.9.28.orig/kernel/debug/kdb/kdb_io.c linux-4.9.28/kernel/debug/kdb/kdb_io.c +--- linux-4.9.28.orig/kernel/debug/kdb/kdb_io.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/debug/kdb/kdb_io.c 2017-05-19 03:37:25.182176801 +0200 +@@ -554,7 +554,6 @@ + int linecount; + int colcount; + int logging, saved_loglevel = 0; +- int saved_trap_printk; + int got_printf_lock = 0; + int retlen = 0; + int fnd, len; +@@ -565,8 +564,6 @@ + unsigned long uninitialized_var(flags); + + preempt_disable(); +- saved_trap_printk = kdb_trap_printk; +- kdb_trap_printk = 0; + + /* Serialize kdb_printf if multiple cpus try to write at once. + * But if any cpu goes recursive in kdb, just print the output, +@@ -855,7 +852,6 @@ + } else { + __release(kdb_printf_lock); + } +- kdb_trap_printk = saved_trap_printk; + preempt_enable(); + return retlen; + } +@@ -865,9 +861,11 @@ + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } +diff -Nur linux-4.9.28.orig/kernel/events/core.c linux-4.9.28/kernel/events/core.c +--- linux-4.9.28.orig/kernel/events/core.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/events/core.c 2017-05-19 03:37:25.182176801 +0200 +@@ -1050,6 +1050,7 @@ + raw_spin_lock_init(&cpuctx->hrtimer_lock); + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + timer->function = perf_mux_hrtimer_handler; ++ timer->irqsafe = 1; + } + + static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) +@@ -8363,6 +8364,7 @@ + + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; ++ hwc->hrtimer.irqsafe = 1; + + /* + * Since hrtimers have a fixed rate, we can do a static freq->period +diff -Nur linux-4.9.28.orig/kernel/exit.c linux-4.9.28/kernel/exit.c +--- linux-4.9.28.orig/kernel/exit.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/exit.c 2017-05-19 03:37:25.182176801 +0200 +@@ -143,7 +143,7 @@ + * Do this under ->siglock, we can race with another thread + * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. + */ +- flush_sigqueue(&tsk->pending); ++ flush_task_sigqueue(tsk); + tsk->sighand = NULL; + spin_unlock(&sighand->siglock); + +diff -Nur linux-4.9.28.orig/kernel/fork.c linux-4.9.28/kernel/fork.c +--- linux-4.9.28.orig/kernel/fork.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/fork.c 2017-05-19 03:37:25.182176801 +0200 +@@ -76,6 +76,7 @@ + #include <linux/compiler.h> + #include <linux/sysctl.h> + #include <linux/kcov.h> ++#include <linux/kprobes.h> + + #include <asm/pgtable.h> + #include <asm/pgalloc.h> +@@ -376,13 +377,24 @@ + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++static ++#endif + void __put_task_struct(struct task_struct *tsk) + { + WARN_ON(!tsk->exit_state); + WARN_ON(atomic_read(&tsk->usage)); + WARN_ON(tsk == current); + ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(tsk); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(tsk); ++ + cgroup_free(tsk); + task_numa_free(tsk); + security_task_free(tsk); +@@ -393,7 +405,18 @@ + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + EXPORT_SYMBOL_GPL(__put_task_struct); ++#else ++void __put_task_struct_cb(struct rcu_head *rhp) ++{ ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); ++ ++ __put_task_struct(tsk); ++ ++} ++EXPORT_SYMBOL_GPL(__put_task_struct_cb); ++#endif + + void __init __weak arch_task_cache_init(void) { } + +@@ -852,6 +875,19 @@ + } + EXPORT_SYMBOL_GPL(__mmdrop); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++/* ++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't ++ * want another facility to make this work. ++ */ ++void __mmdrop_delayed(struct rcu_head *rhp) ++{ ++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); ++ ++ __mmdrop(mm); ++} ++#endif ++ + static inline void __mmput(struct mm_struct *mm) + { + VM_BUG_ON(atomic_read(&mm->mm_users)); +@@ -1417,6 +1453,7 @@ + #ifdef CONFIG_RT_MUTEXES + p->pi_waiters = RB_ROOT; + p->pi_waiters_leftmost = NULL; ++ p->pi_top_task = NULL; + p->pi_blocked_on = NULL; + #endif + } +@@ -1426,6 +1463,9 @@ + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.sched_exp = 0; +@@ -1552,6 +1592,7 @@ + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); ++ p->sigqueue_cache = NULL; + + p->utime = p->stime = p->gtime = 0; + p->utimescaled = p->stimescaled = 0; +diff -Nur linux-4.9.28.orig/kernel/futex.c linux-4.9.28/kernel/futex.c +--- linux-4.9.28.orig/kernel/futex.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/futex.c 2017-05-19 03:37:25.182176801 +0200 +@@ -800,7 +800,7 @@ + return 0; + } + +-static struct futex_pi_state * alloc_pi_state(void) ++static struct futex_pi_state *alloc_pi_state(void) + { + struct futex_pi_state *pi_state = current->pi_state_cache; + +@@ -810,6 +810,11 @@ + return pi_state; + } + ++static void get_pi_state(struct futex_pi_state *pi_state) ++{ ++ WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount)); ++} ++ + /* + * Drops a reference to the pi_state object and frees or caches it + * when the last reference is gone. +@@ -854,7 +859,7 @@ + * Look up the task based on what TID userspace gave us. + * We dont trust it. + */ +-static struct task_struct * futex_find_get_task(pid_t pid) ++static struct task_struct *futex_find_get_task(pid_t pid) + { + struct task_struct *p; + +@@ -904,7 +909,9 @@ + * task still owns the PI-state: + */ + if (head->next != next) { ++ raw_spin_unlock_irq(&curr->pi_lock); + spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); + continue; + } + +@@ -914,10 +921,12 @@ + pi_state->owner = NULL; + raw_spin_unlock_irq(&curr->pi_lock); + +- rt_mutex_unlock(&pi_state->pi_mutex); +- ++ get_pi_state(pi_state); + spin_unlock(&hb->lock); + ++ rt_mutex_futex_unlock(&pi_state->pi_mutex); ++ put_pi_state(pi_state); ++ + raw_spin_lock_irq(&curr->pi_lock); + } + raw_spin_unlock_irq(&curr->pi_lock); +@@ -971,6 +980,39 @@ + * + * [10] There is no transient state which leaves owner and user space + * TID out of sync. ++ * ++ * ++ * Serialization and lifetime rules: ++ * ++ * hb->lock: ++ * ++ * hb -> futex_q, relation ++ * futex_q -> pi_state, relation ++ * ++ * (cannot be raw because hb can contain arbitrary amount ++ * of futex_q's) ++ * ++ * pi_mutex->wait_lock: ++ * ++ * {uval, pi_state} ++ * ++ * (and pi_mutex 'obviously') ++ * ++ * p->pi_lock: ++ * ++ * p->pi_state_list -> pi_state->list, relation ++ * ++ * pi_state->refcount: ++ * ++ * pi_state lifetime ++ * ++ * ++ * Lock order: ++ * ++ * hb->lock ++ * pi_mutex->wait_lock ++ * p->pi_lock ++ * + */ + + /* +@@ -978,10 +1020,13 @@ + * the pi_state against the user space value. If correct, attach to + * it. + */ +-static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, ++static int attach_to_pi_state(u32 __user *uaddr, u32 uval, ++ struct futex_pi_state *pi_state, + struct futex_pi_state **ps) + { + pid_t pid = uval & FUTEX_TID_MASK; ++ u32 uval2; ++ int ret; + + /* + * Userspace might have messed up non-PI and PI futexes [3] +@@ -989,9 +1034,39 @@ + if (unlikely(!pi_state)) + return -EINVAL; + ++ /* ++ * We get here with hb->lock held, and having found a ++ * futex_top_waiter(). This means that futex_lock_pi() of said futex_q ++ * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), ++ * which in turn means that futex_lock_pi() still has a reference on ++ * our pi_state. ++ * ++ * The waiter holding a reference on @pi_state also protects against ++ * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() ++ * and futex_wait_requeue_pi() as it cannot go to 0 and consequently ++ * free pi_state before we can take a reference ourselves. ++ */ + WARN_ON(!atomic_read(&pi_state->refcount)); + + /* ++ * Now that we have a pi_state, we can acquire wait_lock ++ * and do the state validation. ++ */ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ ++ /* ++ * Since {uval, pi_state} is serialized by wait_lock, and our current ++ * uval was read without holding it, it can have changed. Verify it ++ * still is what we expect it to be, otherwise retry the entire ++ * operation. ++ */ ++ if (get_futex_value_locked(&uval2, uaddr)) ++ goto out_efault; ++ ++ if (uval != uval2) ++ goto out_eagain; ++ ++ /* + * Handle the owner died case: + */ + if (uval & FUTEX_OWNER_DIED) { +@@ -1006,11 +1081,11 @@ + * is not 0. Inconsistent state. [5] + */ + if (pid) +- return -EINVAL; ++ goto out_einval; + /* + * Take a ref on the state and return success. [4] + */ +- goto out_state; ++ goto out_attach; + } + + /* +@@ -1022,14 +1097,14 @@ + * Take a ref on the state and return success. [6] + */ + if (!pid) +- goto out_state; ++ goto out_attach; + } else { + /* + * If the owner died bit is not set, then the pi_state + * must have an owner. [7] + */ + if (!pi_state->owner) +- return -EINVAL; ++ goto out_einval; + } + + /* +@@ -1038,11 +1113,29 @@ + * user space TID. [9/10] + */ + if (pid != task_pid_vnr(pi_state->owner)) +- return -EINVAL; +-out_state: +- atomic_inc(&pi_state->refcount); ++ goto out_einval; ++ ++out_attach: ++ get_pi_state(pi_state); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + *ps = pi_state; + return 0; ++ ++out_einval: ++ ret = -EINVAL; ++ goto out_error; ++ ++out_eagain: ++ ret = -EAGAIN; ++ goto out_error; ++ ++out_efault: ++ ret = -EFAULT; ++ goto out_error; ++ ++out_error: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return ret; + } + + /* +@@ -1093,6 +1186,9 @@ + + /* + * No existing pi state. First waiter. [2] ++ * ++ * This creates pi_state, we have hb->lock held, this means nothing can ++ * observe this state, wait_lock is irrelevant. + */ + pi_state = alloc_pi_state(); + +@@ -1117,17 +1213,18 @@ + return 0; + } + +-static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, ++static int lookup_pi_state(u32 __user *uaddr, u32 uval, ++ struct futex_hash_bucket *hb, + union futex_key *key, struct futex_pi_state **ps) + { +- struct futex_q *match = futex_top_waiter(hb, key); ++ struct futex_q *top_waiter = futex_top_waiter(hb, key); + + /* + * If there is a waiter on that futex, validate it and + * attach to the pi_state when the validation succeeds. + */ +- if (match) +- return attach_to_pi_state(uval, match->pi_state, ps); ++ if (top_waiter) ++ return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); + + /* + * We are the first waiter - try to look up the owner based on +@@ -1146,7 +1243,7 @@ + if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) + return -EFAULT; + +- /*If user space value changed, let the caller retry */ ++ /* If user space value changed, let the caller retry */ + return curval != uval ? -EAGAIN : 0; + } + +@@ -1174,7 +1271,7 @@ + struct task_struct *task, int set_waiters) + { + u32 uval, newval, vpid = task_pid_vnr(task); +- struct futex_q *match; ++ struct futex_q *top_waiter; + int ret; + + /* +@@ -1200,9 +1297,9 @@ + * Lookup existing state first. If it exists, try to attach to + * its pi_state. + */ +- match = futex_top_waiter(hb, key); +- if (match) +- return attach_to_pi_state(uval, match->pi_state, ps); ++ top_waiter = futex_top_waiter(hb, key); ++ if (top_waiter) ++ return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); + + /* + * No waiter and user TID is 0. We are here because the +@@ -1283,50 +1380,45 @@ + wake_q_add(wake_q, p); + __unqueue_futex(q); + /* +- * The waiting task can free the futex_q as soon as +- * q->lock_ptr = NULL is written, without taking any locks. A +- * memory barrier is required here to prevent the following +- * store to lock_ptr from getting ahead of the plist_del. ++ * The waiting task can free the futex_q as soon as q->lock_ptr = NULL ++ * is written, without taking any locks. This is possible in the event ++ * of a spurious wakeup, for example. A memory barrier is required here ++ * to prevent the following store to lock_ptr from getting ahead of the ++ * plist_del in __unqueue_futex(). + */ +- smp_wmb(); +- q->lock_ptr = NULL; ++ smp_store_release(&q->lock_ptr, NULL); + } + +-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, +- struct futex_hash_bucket *hb) ++/* ++ * Caller must hold a reference on @pi_state. ++ */ ++static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) + { +- struct task_struct *new_owner; +- struct futex_pi_state *pi_state = this->pi_state; + u32 uninitialized_var(curval), newval; ++ struct task_struct *new_owner; ++ bool postunlock = false; + WAKE_Q(wake_q); +- bool deboost; ++ WAKE_Q(wake_sleeper_q); + int ret = 0; + +- if (!pi_state) +- return -EINVAL; +- +- /* +- * If current does not own the pi_state then the futex is +- * inconsistent and user space fiddled with the futex value. +- */ +- if (pi_state->owner != current) +- return -EINVAL; +- +- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); ++ if (WARN_ON_ONCE(!new_owner)) { ++ /* ++ * As per the comment in futex_unlock_pi() this should not happen. ++ * ++ * When this happens, give up our locks and try again, giving ++ * the futex_lock_pi() instance time to complete, either by ++ * waiting on the rtmutex or removing itself from the futex ++ * queue. ++ */ ++ ret = -EAGAIN; ++ goto out_unlock; ++ } + + /* +- * It is possible that the next waiter (the one that brought +- * this owner to the kernel) timed out and is no longer +- * waiting on the lock. +- */ +- if (!new_owner) +- new_owner = this->task; +- +- /* +- * We pass it to the next owner. The WAITERS bit is always +- * kept enabled while there is PI state around. We cleanup the +- * owner died bit, because we are the owner. ++ * We pass it to the next owner. The WAITERS bit is always kept ++ * enabled while there is PI state around. We cleanup the owner ++ * died bit, because we are the owner. + */ + newval = FUTEX_WAITERS | task_pid_vnr(new_owner); + +@@ -1335,6 +1427,7 @@ + + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { + ret = -EFAULT; ++ + } else if (curval != uval) { + /* + * If a unconditional UNLOCK_PI operation (user space did not +@@ -1347,10 +1440,14 @@ + else + ret = -EINVAL; + } +- if (ret) { +- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); +- return ret; +- } ++ ++ if (ret) ++ goto out_unlock; ++ ++ /* ++ * This is a point of no return; once we modify the uval there is no ++ * going back and subsequent operations must not fail. ++ */ + + raw_spin_lock(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); +@@ -1363,22 +1460,15 @@ + pi_state->owner = new_owner; + raw_spin_unlock(&new_owner->pi_lock); + ++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, ++ &wake_sleeper_q); ++out_unlock: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + +- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); ++ if (postunlock) ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); + +- /* +- * First unlock HB so the waiter does not spin on it once he got woken +- * up. Second wake up the waiter before the priority is adjusted. If we +- * deboost first (and lose our higher priority), then the task might get +- * scheduled away before the wake up can take place. +- */ +- spin_unlock(&hb->lock); +- wake_up_q(&wake_q); +- if (deboost) +- rt_mutex_adjust_prio(current); +- +- return 0; ++ return ret; + } + + /* +@@ -1824,7 +1914,7 @@ + * If that call succeeds then we have pi_state and an + * initial refcount on it. + */ +- ret = lookup_pi_state(ret, hb2, &key2, &pi_state); ++ ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state); + } + + switch (ret) { +@@ -1907,7 +1997,7 @@ + * refcount on the pi_state and store the pointer in + * the futex_q object of the waiter. + */ +- atomic_inc(&pi_state->refcount); ++ get_pi_state(pi_state); + this->pi_state = pi_state; + ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, + this->rt_waiter, +@@ -1924,6 +2014,16 @@ + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ put_pi_state(pi_state); ++ continue; + } else if (ret) { + /* + * rt_mutex_start_proxy_lock() detected a +@@ -2007,20 +2107,7 @@ + hb_waiters_dec(hb); + } + +-/** +- * queue_me() - Enqueue the futex_q on the futex_hash_bucket +- * @q: The futex_q to enqueue +- * @hb: The destination hash bucket +- * +- * The hb->lock must be held by the caller, and is released here. A call to +- * queue_me() is typically paired with exactly one call to unqueue_me(). The +- * exceptions involve the PI related operations, which may use unqueue_me_pi() +- * or nothing if the unqueue is done as part of the wake process and the unqueue +- * state is implicit in the state of woken task (see futex_wait_requeue_pi() for +- * an example). +- */ +-static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) +- __releases(&hb->lock) ++static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + { + int prio; + +@@ -2037,6 +2124,24 @@ + plist_node_init(&q->list, prio); + plist_add(&q->list, &hb->chain); + q->task = current; ++} ++ ++/** ++ * queue_me() - Enqueue the futex_q on the futex_hash_bucket ++ * @q: The futex_q to enqueue ++ * @hb: The destination hash bucket ++ * ++ * The hb->lock must be held by the caller, and is released here. A call to ++ * queue_me() is typically paired with exactly one call to unqueue_me(). The ++ * exceptions involve the PI related operations, which may use unqueue_me_pi() ++ * or nothing if the unqueue is done as part of the wake process and the unqueue ++ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for ++ * an example). ++ */ ++static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) ++ __releases(&hb->lock) ++{ ++ __queue_me(q, hb); + spin_unlock(&hb->lock); + } + +@@ -2123,10 +2228,13 @@ + { + u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; + struct futex_pi_state *pi_state = q->pi_state; +- struct task_struct *oldowner = pi_state->owner; + u32 uval, uninitialized_var(curval), newval; ++ struct task_struct *oldowner; + int ret; + ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ ++ oldowner = pi_state->owner; + /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; +@@ -2134,7 +2242,8 @@ + /* + * We are here either because we stole the rtmutex from the + * previous highest priority waiter or we are the highest priority +- * waiter but failed to get the rtmutex the first time. ++ * waiter but have failed to get the rtmutex the first time. ++ * + * We have to replace the newowner TID in the user space variable. + * This must be atomic as we have to preserve the owner died bit here. + * +@@ -2142,17 +2251,16 @@ + * because we can fault here. Imagine swapped out pages or a fork + * that marked all the anonymous memory readonly for cow. + * +- * Modifying pi_state _before_ the user space value would +- * leave the pi_state in an inconsistent state when we fault +- * here, because we need to drop the hash bucket lock to +- * handle the fault. This might be observed in the PID check +- * in lookup_pi_state. ++ * Modifying pi_state _before_ the user space value would leave the ++ * pi_state in an inconsistent state when we fault here, because we ++ * need to drop the locks to handle the fault. This might be observed ++ * in the PID check in lookup_pi_state. + */ + retry: + if (get_futex_value_locked(&uval, uaddr)) + goto handle_fault; + +- while (1) { ++ for (;;) { + newval = (uval & FUTEX_OWNER_DIED) | newtid; + + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) +@@ -2167,47 +2275,60 @@ + * itself. + */ + if (pi_state->owner != NULL) { +- raw_spin_lock_irq(&pi_state->owner->pi_lock); ++ raw_spin_lock(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); +- raw_spin_unlock_irq(&pi_state->owner->pi_lock); ++ raw_spin_unlock(&pi_state->owner->pi_lock); + } + + pi_state->owner = newowner; + +- raw_spin_lock_irq(&newowner->pi_lock); ++ raw_spin_lock(&newowner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &newowner->pi_state_list); +- raw_spin_unlock_irq(&newowner->pi_lock); ++ raw_spin_unlock(&newowner->pi_lock); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ + return 0; + + /* +- * To handle the page fault we need to drop the hash bucket +- * lock here. That gives the other task (either the highest priority +- * waiter itself or the task which stole the rtmutex) the +- * chance to try the fixup of the pi_state. So once we are +- * back from handling the fault we need to check the pi_state +- * after reacquiring the hash bucket lock and before trying to +- * do another fixup. When the fixup has been done already we +- * simply return. ++ * To handle the page fault we need to drop the locks here. That gives ++ * the other task (either the highest priority waiter itself or the ++ * task which stole the rtmutex) the chance to try the fixup of the ++ * pi_state. So once we are back from handling the fault we need to ++ * check the pi_state after reacquiring the locks and before trying to ++ * do another fixup. When the fixup has been done already we simply ++ * return. ++ * ++ * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely ++ * drop hb->lock since the caller owns the hb -> futex_q relation. ++ * Dropping the pi_mutex->wait_lock requires the state revalidate. + */ + handle_fault: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + spin_unlock(q->lock_ptr); + + ret = fault_in_user_writeable(uaddr); + + spin_lock(q->lock_ptr); ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + + /* + * Check if someone else fixed it for us: + */ +- if (pi_state->owner != oldowner) +- return 0; ++ if (pi_state->owner != oldowner) { ++ ret = 0; ++ goto out_unlock; ++ } + + if (ret) +- return ret; ++ goto out_unlock; + + goto retry; ++ ++out_unlock: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return ret; + } + + static long futex_wait_restart(struct restart_block *restart); +@@ -2229,13 +2350,16 @@ + */ + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + { +- struct task_struct *owner; + int ret = 0; + + if (locked) { + /* + * Got the lock. We might not be the anticipated owner if we + * did a lock-steal - fix up the PI-state in that case: ++ * ++ * We can safely read pi_state->owner without holding wait_lock ++ * because we now own the rt_mutex, only the owner will attempt ++ * to change it. + */ + if (q->pi_state->owner != current) + ret = fixup_pi_state_owner(uaddr, q, current); +@@ -2243,43 +2367,15 @@ + } + + /* +- * Catch the rare case, where the lock was released when we were on the +- * way back before we locked the hash bucket. +- */ +- if (q->pi_state->owner == current) { +- /* +- * Try to get the rt_mutex now. This might fail as some other +- * task acquired the rt_mutex after we removed ourself from the +- * rt_mutex waiters list. +- */ +- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { +- locked = 1; +- goto out; +- } +- +- /* +- * pi_state is incorrect, some other task did a lock steal and +- * we returned due to timeout or signal without taking the +- * rt_mutex. Too late. +- */ +- raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); +- owner = rt_mutex_owner(&q->pi_state->pi_mutex); +- if (!owner) +- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); +- raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); +- ret = fixup_pi_state_owner(uaddr, q, owner); +- goto out; +- } +- +- /* + * Paranoia check. If we did not take the lock, then we should not be + * the owner of the rt_mutex. + */ +- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) ++ if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { + printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " + "pi-state %p\n", ret, + q->pi_state->pi_mutex.owner, + q->pi_state->owner); ++ } + + out: + return ret ? ret : locked; +@@ -2503,6 +2599,8 @@ + ktime_t *time, int trylock) + { + struct hrtimer_sleeper timeout, *to = NULL; ++ struct futex_pi_state *pi_state = NULL; ++ struct rt_mutex_waiter rt_waiter; + struct futex_hash_bucket *hb; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2555,25 +2653,77 @@ + } + } + ++ WARN_ON(!q.pi_state); ++ + /* + * Only actually queue now that the atomic ops are done: + */ +- queue_me(&q, hb); ++ __queue_me(&q, hb); + +- WARN_ON(!q.pi_state); +- /* +- * Block on the PI mutex: +- */ +- if (!trylock) { +- ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); +- } else { +- ret = rt_mutex_trylock(&q.pi_state->pi_mutex); ++ if (trylock) { ++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; ++ goto no_block; + } + ++ rt_mutex_init_waiter(&rt_waiter, false); ++ ++ /* ++ * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not ++ * hold it while doing rt_mutex_start_proxy(), because then it will ++ * include hb->lock in the blocking chain, even through we'll not in ++ * fact hold it while blocking. This will lead it to report -EDEADLK ++ * and BUG when futex_unlock_pi() interleaves with this. ++ * ++ * Therefore acquire wait_lock while holding hb->lock, but drop the ++ * latter before calling rt_mutex_start_proxy_lock(). This still fully ++ * serializes against futex_unlock_pi() as that does the exact same ++ * lock handoff sequence. ++ */ ++ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); ++ /* ++ * the migrate_disable() here disables migration in the in_atomic() fast ++ * path which is enabled again in the following spin_unlock(). We have ++ * one migrate_disable() pending in the slow-path which is reversed ++ * after the raw_spin_unlock_irq() where we leave the atomic context. ++ */ ++ migrate_disable(); ++ ++ spin_unlock(q.lock_ptr); ++ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); ++ raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); ++ migrate_enable(); ++ ++ if (ret) { ++ if (ret == 1) ++ ret = 0; ++ ++ spin_lock(q.lock_ptr); ++ goto no_block; ++ } ++ ++ ++ if (unlikely(to)) ++ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); ++ ++ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); ++ + spin_lock(q.lock_ptr); + /* ++ * If we failed to acquire the lock (signal/timeout), we must ++ * first acquire the hb->lock before removing the lock from the ++ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex ++ * wait lists consistent. ++ * ++ * In particular; it is important that futex_unlock_pi() can not ++ * observe this inconsistency. ++ */ ++ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) ++ ret = 0; ++ ++no_block: ++ /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. + */ +@@ -2589,12 +2739,19 @@ + * If fixup_owner() faulted and was unable to handle the fault, unlock + * it and return the fault to userspace. + */ +- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) +- rt_mutex_unlock(&q.pi_state->pi_mutex); ++ if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { ++ pi_state = q.pi_state; ++ get_pi_state(pi_state); ++ } + + /* Unqueue and drop the lock */ + unqueue_me_pi(&q); + ++ if (pi_state) { ++ rt_mutex_futex_unlock(&pi_state->pi_mutex); ++ put_pi_state(pi_state); ++ } ++ + goto out_put_key; + + out_unlock_put_key: +@@ -2603,8 +2760,10 @@ + out_put_key: + put_futex_key(&q.key); + out: +- if (to) ++ if (to) { ++ hrtimer_cancel(&to->timer); + destroy_hrtimer_on_stack(&to->timer); ++ } + return ret != -EINTR ? ret : -ERESTARTNOINTR; + + uaddr_faulted: +@@ -2631,7 +2790,7 @@ + u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); + union futex_key key = FUTEX_KEY_INIT; + struct futex_hash_bucket *hb; +- struct futex_q *match; ++ struct futex_q *top_waiter; + int ret; + + retry: +@@ -2655,12 +2814,48 @@ + * all and we at least want to know if user space fiddled + * with the futex value instead of blindly unlocking. + */ +- match = futex_top_waiter(hb, &key); +- if (match) { +- ret = wake_futex_pi(uaddr, uval, match, hb); ++ top_waiter = futex_top_waiter(hb, &key); ++ if (top_waiter) { ++ struct futex_pi_state *pi_state = top_waiter->pi_state; ++ ++ ret = -EINVAL; ++ if (!pi_state) ++ goto out_unlock; ++ + /* +- * In case of success wake_futex_pi dropped the hash +- * bucket lock. ++ * If current does not own the pi_state then the futex is ++ * inconsistent and user space fiddled with the futex value. ++ */ ++ if (pi_state->owner != current) ++ goto out_unlock; ++ ++ get_pi_state(pi_state); ++ /* ++ * By taking wait_lock while still holding hb->lock, we ensure ++ * there is no point where we hold neither; and therefore ++ * wake_futex_pi() must observe a state consistent with what we ++ * observed. ++ */ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ /* ++ * Magic trickery for now to make the RT migrate disable ++ * logic happy. The following spin_unlock() happens with ++ * interrupts disabled so the internal migrate_enable() ++ * won't undo the migrate_disable() which was issued when ++ * locking hb->lock. ++ */ ++ migrate_disable(); ++ spin_unlock(&hb->lock); ++ ++ /* Drops pi_state->pi_mutex.wait_lock */ ++ ret = wake_futex_pi(uaddr, uval, pi_state); ++ ++ migrate_enable(); ++ ++ put_pi_state(pi_state); ++ ++ /* ++ * Success, we're done! No tricky corner cases. + */ + if (!ret) + goto out_putkey; +@@ -2675,7 +2870,6 @@ + * setting the FUTEX_WAITERS bit. Try again. + */ + if (ret == -EAGAIN) { +- spin_unlock(&hb->lock); + put_futex_key(&key); + goto retry; + } +@@ -2683,7 +2877,7 @@ + * wake_futex_pi has detected invalid state. Tell user + * space. + */ +- goto out_unlock; ++ goto out_putkey; + } + + /* +@@ -2693,8 +2887,10 @@ + * preserve the WAITERS bit not the OWNER_DIED one. We are the + * owner. + */ +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) ++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) { ++ spin_unlock(&hb->lock); + goto pi_faulted; ++ } + + /* + * If uval has changed, let user space handle it. +@@ -2708,7 +2904,6 @@ + return ret; + + pi_faulted: +- spin_unlock(&hb->lock); + put_futex_key(&key); + + ret = fault_in_user_writeable(uaddr); +@@ -2812,8 +3007,9 @@ + u32 __user *uaddr2) + { + struct hrtimer_sleeper timeout, *to = NULL; ++ struct futex_pi_state *pi_state = NULL; + struct rt_mutex_waiter rt_waiter; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2838,10 +3034,7 @@ + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- debug_rt_mutex_init_waiter(&rt_waiter); +- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); +- RB_CLEAR_NODE(&rt_waiter.tree_entry); +- rt_waiter.task = NULL; ++ rt_mutex_init_waiter(&rt_waiter, false); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); + if (unlikely(ret != 0)) +@@ -2872,20 +3065,55 @@ + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -2894,16 +3122,19 @@ + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) +- rt_mutex_unlock(&q.pi_state->pi_mutex); ++ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { ++ pi_state = q.pi_state; ++ get_pi_state(pi_state); ++ } + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. + */ + put_pi_state(q.pi_state); +- spin_unlock(q.lock_ptr); ++ spin_unlock(&hb2->lock); + } + } else { + struct rt_mutex *pi_mutex; +@@ -2915,10 +3146,14 @@ + */ + WARN_ON(!q.pi_state); + pi_mutex = &q.pi_state->pi_mutex; +- ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter); +- debug_rt_mutex_free_waiter(&rt_waiter); ++ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); + +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); ++ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ++ ret = 0; ++ ++ debug_rt_mutex_free_waiter(&rt_waiter); + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +@@ -2936,13 +3171,20 @@ + * the fault, unlock the rt_mutex and return the fault to + * userspace. + */ +- if (ret && rt_mutex_owner(pi_mutex) == current) +- rt_mutex_unlock(pi_mutex); ++ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { ++ pi_state = q.pi_state; ++ get_pi_state(pi_state); ++ } + + /* Unqueue and drop the lock. */ + unqueue_me_pi(&q); + } + ++ if (pi_state) { ++ rt_mutex_futex_unlock(&pi_state->pi_mutex); ++ put_pi_state(pi_state); ++ } ++ + if (ret == -EINTR) { + /* + * We've already been requeued, but cannot restart by calling +diff -Nur linux-4.9.28.orig/kernel/irq/handle.c linux-4.9.28/kernel/irq/handle.c +--- linux-4.9.28.orig/kernel/irq/handle.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/irq/handle.c 2017-05-19 03:37:25.186176955 +0200 +@@ -181,10 +181,16 @@ + { + irqreturn_t retval; + unsigned int flags = 0; ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + + retval = __handle_irq_event_percpu(desc, &flags); + +- add_interrupt_randomness(desc->irq_data.irq, flags); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ desc->random_ip = ip; ++#else ++ add_interrupt_randomness(desc->irq_data.irq, flags, ip); ++#endif + + if (!noirqdebug) + note_interrupt(desc, retval); +diff -Nur linux-4.9.28.orig/kernel/irq/manage.c linux-4.9.28/kernel/irq/manage.c +--- linux-4.9.28.orig/kernel/irq/manage.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/irq/manage.c 2017-05-19 03:37:25.186176955 +0200 +@@ -22,6 +22,7 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + __read_mostly bool force_irqthreads; + + static int __init setup_forced_irqthreads(char *arg) +@@ -30,6 +31,7 @@ + return 0; + } + early_param("threadirqs", setup_forced_irqthreads); ++# endif + #endif + + static void __synchronize_hardirq(struct irq_desc *desc) +@@ -233,7 +235,12 @@ + + if (desc->affinity_notify) { + kref_get(&desc->affinity_notify->kref); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ swork_queue(&desc->affinity_notify->swork); ++#else + schedule_work(&desc->affinity_notify->work); ++#endif + } + irqd_set(data, IRQD_AFFINITY_SET); + +@@ -271,10 +278,8 @@ + } + EXPORT_SYMBOL_GPL(irq_set_affinity_hint); + +-static void irq_affinity_notify(struct work_struct *work) ++static void _irq_affinity_notify(struct irq_affinity_notify *notify) + { +- struct irq_affinity_notify *notify = +- container_of(work, struct irq_affinity_notify, work); + struct irq_desc *desc = irq_to_desc(notify->irq); + cpumask_var_t cpumask; + unsigned long flags; +@@ -296,6 +301,35 @@ + kref_put(¬ify->kref, notify->release); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void init_helper_thread(void) ++{ ++ static int init_sworker_once; ++ ++ if (init_sworker_once) ++ return; ++ if (WARN_ON(swork_get())) ++ return; ++ init_sworker_once = 1; ++} ++ ++static void irq_affinity_notify(struct swork_event *swork) ++{ ++ struct irq_affinity_notify *notify = ++ container_of(swork, struct irq_affinity_notify, swork); ++ _irq_affinity_notify(notify); ++} ++ ++#else ++ ++static void irq_affinity_notify(struct work_struct *work) ++{ ++ struct irq_affinity_notify *notify = ++ container_of(work, struct irq_affinity_notify, work); ++ _irq_affinity_notify(notify); ++} ++#endif ++ + /** + * irq_set_affinity_notifier - control notification of IRQ affinity changes + * @irq: Interrupt for which to enable/disable notification +@@ -324,7 +358,12 @@ + if (notify) { + notify->irq = irq; + kref_init(¬ify->kref); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ INIT_SWORK(¬ify->swork, irq_affinity_notify); ++ init_helper_thread(); ++#else + INIT_WORK(¬ify->work, irq_affinity_notify); ++#endif + } + + raw_spin_lock_irqsave(&desc->lock, flags); +@@ -879,7 +918,15 @@ + local_bh_disable(); + ret = action->thread_fn(action->irq, action->dev_id); + irq_finalize_oneshot(desc, action); +- local_bh_enable(); ++ /* ++ * Interrupts which have real time requirements can be set up ++ * to avoid softirq processing in the thread handler. This is ++ * safe as these interrupts do not raise soft interrupts. ++ */ ++ if (irq_settings_no_softirq_call(desc)) ++ _local_bh_enable(); ++ else ++ local_bh_enable(); + return ret; + } + +@@ -976,6 +1023,12 @@ + if (action_ret == IRQ_WAKE_THREAD) + irq_wake_secondary(desc, action); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_disable(); ++ add_interrupt_randomness(action->irq, 0, ++ desc->random_ip ^ (unsigned long) action); ++ migrate_enable(); ++#endif + wake_threads_waitq(desc); + } + +@@ -1336,6 +1389,9 @@ + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + } + ++ if (new->flags & IRQF_NO_SOFTIRQ_CALL) ++ irq_settings_set_no_softirq_call(desc); ++ + /* Set default affinity mask once everything is setup */ + setup_affinity(desc, mask); + +@@ -2061,7 +2117,7 @@ + * This call sets the internal irqchip state of an interrupt, + * depending on the value of @which. + * +- * This function should be called with preemption disabled if the ++ * This function should be called with migration disabled if the + * interrupt controller has per-cpu registers. + */ + int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, +diff -Nur linux-4.9.28.orig/kernel/irq/settings.h linux-4.9.28/kernel/irq/settings.h +--- linux-4.9.28.orig/kernel/irq/settings.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/irq/settings.h 2017-05-19 03:37:25.186176955 +0200 +@@ -16,6 +16,7 @@ + _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_IS_POLLED = IRQ_IS_POLLED, + _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, ++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, + _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, + }; + +@@ -30,6 +31,7 @@ + #define IRQ_PER_CPU_DEVID GOT_YOU_MORON + #define IRQ_IS_POLLED GOT_YOU_MORON + #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON ++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON + #undef IRQF_MODIFY_MASK + #define IRQF_MODIFY_MASK GOT_YOU_MORON + +@@ -40,6 +42,16 @@ + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); + } + ++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) ++{ ++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; ++} ++ ++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) ++{ ++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; ++} ++ + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) + { + return desc->status_use_accessors & _IRQ_PER_CPU; +diff -Nur linux-4.9.28.orig/kernel/irq/spurious.c linux-4.9.28/kernel/irq/spurious.c +--- linux-4.9.28.orig/kernel/irq/spurious.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/irq/spurious.c 2017-05-19 03:37:25.186176955 +0200 +@@ -442,6 +442,10 @@ + + static int __init irqfixup_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); +@@ -454,6 +458,10 @@ + + static int __init irqpoll_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); +diff -Nur linux-4.9.28.orig/kernel/irq_work.c linux-4.9.28/kernel/irq_work.c +--- linux-4.9.28.orig/kernel/irq_work.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/irq_work.c 2017-05-19 03:37:25.186176955 +0200 +@@ -17,6 +17,7 @@ + #include <linux/cpu.h> + #include <linux/notifier.h> + #include <linux/smp.h> ++#include <linux/interrupt.h> + #include <asm/processor.h> + + +@@ -65,6 +66,8 @@ + */ + bool irq_work_queue_on(struct irq_work *work, int cpu) + { ++ struct llist_head *list; ++ + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + +@@ -75,7 +78,12 @@ + if (!irq_work_claim(work)) + return false; + +- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) ++ list = &per_cpu(lazy_list, cpu); ++ else ++ list = &per_cpu(raised_list, cpu); ++ ++ if (llist_add(&work->llnode, list)) + arch_send_call_function_single_ipi(cpu); + + return true; +@@ -86,6 +94,9 @@ + /* Enqueue the irq work @work on the current CPU */ + bool irq_work_queue(struct irq_work *work) + { ++ struct llist_head *list; ++ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); ++ + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return false; +@@ -93,13 +104,15 @@ + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); + +- /* If the work is "lazy", handle it from next tick if any */ +- if (work->flags & IRQ_WORK_LAZY) { +- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && +- tick_nohz_tick_stopped()) +- arch_irq_work_raise(); +- } else { +- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) ++ lazy_work = work->flags & IRQ_WORK_LAZY; ++ ++ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) ++ list = this_cpu_ptr(&lazy_list); ++ else ++ list = this_cpu_ptr(&raised_list); ++ ++ if (llist_add(&work->llnode, list)) { ++ if (!lazy_work || tick_nohz_tick_stopped()) + arch_irq_work_raise(); + } + +@@ -116,9 +129,8 @@ + raised = this_cpu_ptr(&raised_list); + lazy = this_cpu_ptr(&lazy_list); + +- if (llist_empty(raised) || arch_irq_work_has_interrupt()) +- if (llist_empty(lazy)) +- return false; ++ if (llist_empty(raised) && llist_empty(lazy)) ++ return false; + + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); +@@ -132,7 +144,7 @@ + struct irq_work *work; + struct llist_node *llnode; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + + if (llist_empty(list)) + return; +@@ -169,7 +181,16 @@ + void irq_work_run(void) + { + irq_work_run_list(this_cpu_ptr(&raised_list)); +- irq_work_run_list(this_cpu_ptr(&lazy_list)); ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { ++ /* ++ * NOTE: we raise softirq via IPI for safety, ++ * and execute in irq_work_tick() to move the ++ * overhead from hard to soft irq context. ++ */ ++ if (!llist_empty(this_cpu_ptr(&lazy_list))) ++ raise_softirq(TIMER_SOFTIRQ); ++ } else ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); + } + EXPORT_SYMBOL_GPL(irq_work_run); + +@@ -179,8 +200,17 @@ + + if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) + irq_work_run_list(raised); ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); ++} ++ ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++void irq_work_tick_soft(void) ++{ + irq_work_run_list(this_cpu_ptr(&lazy_list)); + } ++#endif + + /* + * Synchronize against the irq_work @entry, ensures the entry is not +diff -Nur linux-4.9.28.orig/kernel/Kconfig.locks linux-4.9.28/kernel/Kconfig.locks +--- linux-4.9.28.orig/kernel/Kconfig.locks 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/Kconfig.locks 2017-05-19 03:37:25.178176648 +0200 +@@ -225,11 +225,11 @@ + + config MUTEX_SPIN_ON_OWNER + def_bool y +- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW ++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL + + config RWSEM_SPIN_ON_OWNER + def_bool y +- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW ++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL + + config LOCK_SPIN_ON_OWNER + def_bool y +diff -Nur linux-4.9.28.orig/kernel/Kconfig.preempt linux-4.9.28/kernel/Kconfig.preempt +--- linux-4.9.28.orig/kernel/Kconfig.preempt 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/Kconfig.preempt 2017-05-19 03:37:25.178176648 +0200 +@@ -1,3 +1,16 @@ ++config PREEMPT ++ bool ++ select PREEMPT_COUNT ++ ++config PREEMPT_RT_BASE ++ bool ++ select PREEMPT ++ ++config HAVE_PREEMPT_LAZY ++ bool ++ ++config PREEMPT_LAZY ++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL + + choice + prompt "Preemption Model" +@@ -33,9 +46,9 @@ + + Select this if you are building a kernel for a desktop system. + +-config PREEMPT ++config PREEMPT__LL + bool "Preemptible Kernel (Low-Latency Desktop)" +- select PREEMPT_COUNT ++ select PREEMPT + select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK + help + This option reduces the latency of the kernel by making +@@ -52,6 +65,22 @@ + embedded system with latency requirements in the milliseconds + range. + ++config PREEMPT_RTB ++ bool "Preemptible Kernel (Basic RT)" ++ select PREEMPT_RT_BASE ++ help ++ This option is basically the same as (Low-Latency Desktop) but ++ enables changes which are preliminary for the full preemptible ++ RT kernel. ++ ++config PREEMPT_RT_FULL ++ bool "Fully Preemptible Kernel (RT)" ++ depends on IRQ_FORCED_THREADING ++ select PREEMPT_RT_BASE ++ select PREEMPT_RCU ++ help ++ All and everything ++ + endchoice + + config PREEMPT_COUNT +diff -Nur linux-4.9.28.orig/kernel/ksysfs.c linux-4.9.28/kernel/ksysfs.c +--- linux-4.9.28.orig/kernel/ksysfs.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/ksysfs.c 2017-05-19 03:37:25.186176955 +0200 +@@ -136,6 +136,15 @@ + + #endif /* CONFIG_KEXEC_CORE */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -225,6 +234,9 @@ + &rcu_expedited_attr.attr, + &rcu_normal_attr.attr, + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, ++#endif + NULL + }; + +diff -Nur linux-4.9.28.orig/kernel/locking/lockdep.c linux-4.9.28/kernel/locking/lockdep.c +--- linux-4.9.28.orig/kernel/locking/lockdep.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/lockdep.c 2017-05-19 03:37:25.186176955 +0200 +@@ -658,6 +658,7 @@ + struct lockdep_subclass_key *key; + struct hlist_head *hash_head; + struct lock_class *class; ++ bool is_static = false; + + if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { + debug_locks_off(); +@@ -671,10 +672,23 @@ + + /* + * Static locks do not have their class-keys yet - for them the key +- * is the lock object itself: +- */ +- if (unlikely(!lock->key)) +- lock->key = (void *)lock; ++ * is the lock object itself. If the lock is in the per cpu area, ++ * the canonical address of the lock (per cpu offset removed) is ++ * used. ++ */ ++ if (unlikely(!lock->key)) { ++ unsigned long can_addr, addr = (unsigned long)lock; ++ ++ if (__is_kernel_percpu_address(addr, &can_addr)) ++ lock->key = (void *)can_addr; ++ else if (__is_module_percpu_address(addr, &can_addr)) ++ lock->key = (void *)can_addr; ++ else if (static_obj(lock)) ++ lock->key = (void *)lock; ++ else ++ return ERR_PTR(-EINVAL); ++ is_static = true; ++ } + + /* + * NOTE: the class-key must be unique. For dynamic locks, a static +@@ -706,7 +720,7 @@ + } + } + +- return NULL; ++ return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); + } + + /* +@@ -724,19 +738,18 @@ + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + + class = look_up_lock_class(lock, subclass); +- if (likely(class)) ++ if (likely(!IS_ERR_OR_NULL(class))) + goto out_set_class_cache; + + /* + * Debug-check: all keys must be persistent! +- */ +- if (!static_obj(lock->key)) { ++ */ ++ if (IS_ERR(class)) { + debug_locks_off(); + printk("INFO: trying to register non-static key.\n"); + printk("the code is fine but needs lockdep annotation.\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); +- + return NULL; + } + +@@ -3410,7 +3423,7 @@ + * Clearly if the lock hasn't been acquired _ever_, we're not + * holding it either, so report failure. + */ +- if (!class) ++ if (IS_ERR_OR_NULL(class)) + return 0; + + /* +@@ -3689,6 +3702,7 @@ + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3703,6 +3717,7 @@ + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); +@@ -4159,7 +4174,7 @@ + * If the class exists we look it up and zap it: + */ + class = look_up_lock_class(lock, j); +- if (class) ++ if (!IS_ERR_OR_NULL(class)) + zap_class(class); + } + /* +diff -Nur linux-4.9.28.orig/kernel/locking/locktorture.c linux-4.9.28/kernel/locking/locktorture.c +--- linux-4.9.28.orig/kernel/locking/locktorture.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/locktorture.c 2017-05-19 03:37:25.186176955 +0200 +@@ -26,7 +26,6 @@ + #include <linux/kthread.h> + #include <linux/sched/rt.h> + #include <linux/spinlock.h> +-#include <linux/rwlock.h> + #include <linux/mutex.h> + #include <linux/rwsem.h> + #include <linux/smp.h> +diff -Nur linux-4.9.28.orig/kernel/locking/Makefile linux-4.9.28/kernel/locking/Makefile +--- linux-4.9.28.orig/kernel/locking/Makefile 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/Makefile 2017-05-19 03:37:25.186176955 +0200 +@@ -2,7 +2,7 @@ + # and is generally not a function of system call inputs. + KCOV_INSTRUMENT := n + +-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o ++obj-y += semaphore.o percpu-rwsem.o + + ifdef CONFIG_FUNCTION_TRACER + CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) +@@ -11,7 +11,11 @@ + CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) + endif + ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) ++obj-y += mutex.o + obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++endif ++obj-y += rwsem.o + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o +@@ -24,7 +28,10 @@ + obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) + obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o + obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o ++endif ++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o + obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o + obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o +diff -Nur linux-4.9.28.orig/kernel/locking/percpu-rwsem.c linux-4.9.28/kernel/locking/percpu-rwsem.c +--- linux-4.9.28.orig/kernel/locking/percpu-rwsem.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/percpu-rwsem.c 2017-05-19 03:37:25.186176955 +0200 +@@ -18,7 +18,7 @@ + /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ + rcu_sync_init(&sem->rss, RCU_SCHED_SYNC); + __init_rwsem(&sem->rw_sem, name, rwsem_key); +- init_waitqueue_head(&sem->writer); ++ init_swait_queue_head(&sem->writer); + sem->readers_block = 0; + return 0; + } +@@ -103,7 +103,7 @@ + __this_cpu_dec(*sem->read_count); + + /* Prod writer to recheck readers_active */ +- wake_up(&sem->writer); ++ swake_up(&sem->writer); + } + EXPORT_SYMBOL_GPL(__percpu_up_read); + +@@ -160,7 +160,7 @@ + */ + + /* Wait for all now active readers to complete. */ +- wait_event(sem->writer, readers_active_check(sem)); ++ swait_event(sem->writer, readers_active_check(sem)); + } + EXPORT_SYMBOL_GPL(percpu_down_write); + +diff -Nur linux-4.9.28.orig/kernel/locking/rt.c linux-4.9.28/kernel/locking/rt.c +--- linux-4.9.28.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/kernel/locking/rt.c 2017-05-19 03:37:25.186176955 +0200 +@@ -0,0 +1,331 @@ ++/* ++ * kernel/rt.c ++ * ++ * Real-Time Preemption Support ++ * ++ * started by Ingo Molnar: ++ * ++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> ++ * ++ * historic credit for proving that Linux spinlocks can be implemented via ++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow ++ * and others) who prototyped it on 2.4 and did lots of comparative ++ * research and analysis; TimeSys, for proving that you can implement a ++ * fully preemptible kernel via the use of IRQ threading and mutexes; ++ * Bill Huey for persuasively arguing on lkml that the mutex model is the ++ * right one; and to MontaVista, who ported pmutexes to 2.6. ++ * ++ * This code is a from-scratch implementation and is not based on pmutexes, ++ * but the idea of converting spinlocks to mutexes is used here too. ++ * ++ * lock debugging, locking tree, deadlock detection: ++ * ++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey ++ * Released under the General Public License (GPL). ++ * ++ * Includes portions of the generic R/W semaphore implementation from: ++ * ++ * Copyright (c) 2001 David Howells (dhowells@redhat.com). ++ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> ++ * - Derived also from comments by Linus ++ * ++ * Pending ownership of locks and ownership stealing: ++ * ++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt ++ * ++ * (also by Steven Rostedt) ++ * - Converted single pi_lock to individual task locks. ++ * ++ * By Esben Nielsen: ++ * Doing priority inheritance with help of the scheduler. ++ * ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> ++ * - major rework based on Esben Nielsens initial patch ++ * - replaced thread_info references by task_struct refs ++ * - removed task->pending_owner dependency ++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks ++ * in the scheduler return path as discussed with Steven Rostedt ++ * ++ * Copyright (C) 2006, Kihon Technologies Inc. ++ * Steven Rostedt <rostedt@goodmis.org> ++ * - debugged and patched Thomas Gleixner's rework. ++ * - added back the cmpxchg to the rework. ++ * - turned atomic require back on for SMP. ++ */ ++ ++#include <linux/spinlock.h> ++#include <linux/rtmutex.h> ++#include <linux/sched.h> ++#include <linux/delay.h> ++#include <linux/module.h> ++#include <linux/kallsyms.h> ++#include <linux/syscalls.h> ++#include <linux/interrupt.h> ++#include <linux/plist.h> ++#include <linux/fs.h> ++#include <linux/futex.h> ++#include <linux/hrtimer.h> ++ ++#include "rtmutex_common.h" ++ ++/* ++ * struct mutex functions ++ */ ++void __mutex_do_init(struct mutex *mutex, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); ++ lockdep_init_map(&mutex->dep_map, name, key, 0); ++#endif ++ mutex->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__mutex_do_init); ++ ++void __lockfunc _mutex_lock(struct mutex *lock) ++{ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock); ++ ++int __lockfunc _mutex_lock_interruptible(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible); ++ ++int __lockfunc _mutex_lock_killable(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) ++{ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nested); ++ ++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) ++{ ++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nest_lock); ++ ++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible_nested); ++ ++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable_nested); ++#endif ++ ++int __lockfunc _mutex_trylock(struct mutex *lock) ++{ ++ int ret = rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_trylock); ++ ++void __lockfunc _mutex_unlock(struct mutex *lock) ++{ ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_unlock); ++ ++/* ++ * rwlock_t functions ++ */ ++int __lockfunc rt_write_trylock(rwlock_t *rwlock) ++{ ++ int ret; ++ ++ migrate_disable(); ++ ret = rt_mutex_trylock(&rwlock->lock); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock); ++ ++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ ret = rt_write_trylock(rwlock); ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock_irqsave); ++ ++int __lockfunc rt_read_trylock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the lock, ++ * but not when read_depth == 0 which means that the lock is ++ * write locked. ++ */ ++ if (rt_mutex_owner(lock) != current) { ++ migrate_disable(); ++ ret = rt_mutex_trylock(lock); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ } else if (!rwlock->read_depth) { ++ ret = 0; ++ } ++ ++ if (ret) ++ rwlock->read_depth++; ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_read_trylock); ++ ++void __lockfunc rt_write_lock(rwlock_t *rwlock) ++{ ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_lock); ++ ++void __lockfunc rt_read_lock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ ++ ++ /* ++ * recursive read locks succeed when current owns the lock ++ */ ++ if (rt_mutex_owner(lock) != current) { ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(lock); ++ } ++ rwlock->read_depth++; ++} ++ ++EXPORT_SYMBOL(rt_read_lock); ++ ++void __lockfunc rt_write_unlock(rwlock_t *rwlock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++ migrate_enable(); ++} ++EXPORT_SYMBOL(rt_write_unlock); ++ ++void __lockfunc rt_read_unlock(rwlock_t *rwlock) ++{ ++ /* Release the lock only when read_depth is down to 0 */ ++ if (--rwlock->read_depth == 0) { ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++ migrate_enable(); ++ } ++} ++EXPORT_SYMBOL(rt_read_unlock); ++ ++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_write_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_write_lock_irqsave); ++ ++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_read_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_read_lock_irqsave); ++ ++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); ++ lockdep_init_map(&rwlock->dep_map, name, key, 0); ++#endif ++ rwlock->lock.save_state = 1; ++ rwlock->read_depth = 0; ++} ++EXPORT_SYMBOL(__rt_rwlock_init); ++ ++/** ++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 ++ * @cnt: the atomic which we are to dec ++ * @lock: the mutex to return holding if we dec to 0 ++ * ++ * return true and hold lock if we dec to 0, return false otherwise ++ */ ++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) ++{ ++ /* dec if we can't possibly hit 0 */ ++ if (atomic_add_unless(cnt, -1, 1)) ++ return 0; ++ /* we might hit 0, so take the lock */ ++ mutex_lock(lock); ++ if (!atomic_dec_and_test(cnt)) { ++ /* when we actually did the dec, we didn't hit 0 */ ++ mutex_unlock(lock); ++ return 0; ++ } ++ /* we hit 0, and we hold the lock */ ++ return 1; ++} ++EXPORT_SYMBOL(atomic_dec_and_mutex_lock); +diff -Nur linux-4.9.28.orig/kernel/locking/rtmutex.c linux-4.9.28/kernel/locking/rtmutex.c +--- linux-4.9.28.orig/kernel/locking/rtmutex.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/rtmutex.c 2017-05-19 03:37:25.186176955 +0200 +@@ -7,6 +7,11 @@ + * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> + * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt + * Copyright (C) 2006 Esben Nielsen ++ * Adaptive Spinlocks: ++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, ++ * and Peter Morreale, ++ * Adaptive Spinlocks simplification: ++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> + * + * See Documentation/locking/rt-mutex-design.txt for details. + */ +@@ -16,6 +21,7 @@ + #include <linux/sched/rt.h> + #include <linux/sched/deadline.h> + #include <linux/timer.h> ++#include <linux/ww_mutex.h> + + #include "rtmutex_common.h" + +@@ -133,6 +139,12 @@ + WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS && ++ waiter != PI_REQUEUE_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if there's no debugging state to be + * set up. +@@ -222,12 +234,25 @@ + } + #endif + ++#define STEAL_NORMAL 0 ++#define STEAL_LATERAL 1 ++/* ++ * Only use with rt_mutex_waiter_{less,equal}() ++ */ ++#define task_to_waiter(p) \ ++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } ++ + static inline int + rt_mutex_waiter_less(struct rt_mutex_waiter *left, +- struct rt_mutex_waiter *right) ++ struct rt_mutex_waiter *right, int mode) + { +- if (left->prio < right->prio) +- return 1; ++ if (mode == STEAL_NORMAL) { ++ if (left->prio < right->prio) ++ return 1; ++ } else { ++ if (left->prio <= right->prio) ++ return 1; ++ } + + /* + * If both waiters have dl_prio(), we check the deadlines of the +@@ -236,12 +261,30 @@ + * then right waiter has a dl_prio() too. + */ + if (dl_prio(left->prio)) +- return dl_time_before(left->task->dl.deadline, +- right->task->dl.deadline); ++ return dl_time_before(left->deadline, right->deadline); + + return 0; + } + ++static inline int ++rt_mutex_waiter_equal(struct rt_mutex_waiter *left, ++ struct rt_mutex_waiter *right) ++{ ++ if (left->prio != right->prio) ++ return 0; ++ ++ /* ++ * If both waiters have dl_prio(), we check the deadlines of the ++ * associated tasks. ++ * If left waiter has a dl_prio(), and we didn't return 0 above, ++ * then right waiter has a dl_prio() too. ++ */ ++ if (dl_prio(left->prio)) ++ return left->deadline == right->deadline; ++ ++ return 1; ++} ++ + static void + rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) + { +@@ -253,7 +296,7 @@ + while (*link) { + parent = *link; + entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); +- if (rt_mutex_waiter_less(waiter, entry)) { ++ if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; +@@ -292,7 +335,7 @@ + while (*link) { + parent = *link; + entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); +- if (rt_mutex_waiter_less(waiter, entry)) { ++ if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; +@@ -320,72 +363,16 @@ + RB_CLEAR_NODE(&waiter->pi_tree_entry); + } + +-/* +- * Calculate task priority from the waiter tree priority +- * +- * Return task->normal_prio when the waiter tree is empty or when +- * the waiter is not allowed to do priority boosting +- */ +-int rt_mutex_getprio(struct task_struct *task) +-{ +- if (likely(!task_has_pi_waiters(task))) +- return task->normal_prio; +- +- return min(task_top_pi_waiter(task)->prio, +- task->normal_prio); +-} +- +-struct task_struct *rt_mutex_get_top_task(struct task_struct *task) +-{ +- if (likely(!task_has_pi_waiters(task))) +- return NULL; +- +- return task_top_pi_waiter(task)->task; +-} +- +-/* +- * Called by sched_setscheduler() to get the priority which will be +- * effective after the change. +- */ +-int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) +-{ +- if (!task_has_pi_waiters(task)) +- return newprio; +- +- if (task_top_pi_waiter(task)->task->prio <= newprio) +- return task_top_pi_waiter(task)->task->prio; +- return newprio; +-} +- +-/* +- * Adjust the priority of a task, after its pi_waiters got modified. +- * +- * This can be both boosting and unboosting. task->pi_lock must be held. +- */ +-static void __rt_mutex_adjust_prio(struct task_struct *task) ++static void rt_mutex_adjust_prio(struct task_struct *p) + { +- int prio = rt_mutex_getprio(task); ++ struct task_struct *pi_task = NULL; + +- if (task->prio != prio || dl_prio(prio)) +- rt_mutex_setprio(task, prio); +-} ++ lockdep_assert_held(&p->pi_lock); + +-/* +- * Adjust task priority (undo boosting). Called from the exit path of +- * rt_mutex_slowunlock() and rt_mutex_slowlock(). +- * +- * (Note: We do this outside of the protection of lock->wait_lock to +- * allow the lock to be taken while or before we readjust the priority +- * of task. We do not use the spin_xx_mutex() variants here as we are +- * outside of the debug path.) +- */ +-void rt_mutex_adjust_prio(struct task_struct *task) +-{ +- unsigned long flags; ++ if (task_has_pi_waiters(p)) ++ pi_task = task_top_pi_waiter(p)->task; + +- raw_spin_lock_irqsave(&task->pi_lock, flags); +- __rt_mutex_adjust_prio(task); +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ rt_mutex_setprio(p, pi_task); + } + + /* +@@ -414,6 +401,14 @@ + return debug_rt_mutex_detect_deadlock(waiter, chwalk); + } + ++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) ++{ ++ if (waiter->savestate) ++ wake_up_lock_sleeper(waiter->task); ++ else ++ wake_up_process(waiter->task); ++} ++ + /* + * Max number of times we'll walk the boosting chain: + */ +@@ -421,7 +416,8 @@ + + static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) + { +- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; ++ return rt_mutex_real_waiter(p->pi_blocked_on) ? ++ p->pi_blocked_on->lock : NULL; + } + + /* +@@ -557,7 +553,7 @@ + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -608,7 +604,7 @@ + * enabled we continue, but stop the requeueing in the chain + * walk. + */ +- if (waiter->prio == task->prio) { ++ if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + if (!detect_deadlock) + goto out_unlock_pi; + else +@@ -704,7 +700,26 @@ + + /* [7] Requeue the waiter in the lock waiter tree. */ + rt_mutex_dequeue(lock, waiter); ++ ++ /* ++ * Update the waiter prio fields now that we're dequeued. ++ * ++ * These values can have changed through either: ++ * ++ * sys_sched_set_scheduler() / sys_sched_setattr() ++ * ++ * or ++ * ++ * DL CBS enforcement advancing the effective deadline. ++ * ++ * Even though pi_waiters also uses these fields, and that tree is only ++ * updated in [11], we can do this here, since we hold [L], which ++ * serializes all pi_waiters access and rb_erase() does not care about ++ * the values of the node being removed. ++ */ + waiter->prio = task->prio; ++ waiter->deadline = task->dl.deadline; ++ + rt_mutex_enqueue(lock, waiter); + + /* [8] Release the task */ +@@ -719,13 +734,16 @@ + * follow here. This is the end of the chain we are walking. + */ + if (!rt_mutex_owner(lock)) { ++ struct rt_mutex_waiter *lock_top_waiter; ++ + /* + * If the requeue [7] above changed the top waiter, + * then we need to wake the new top waiter up to try + * to get the lock. + */ +- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) +- wake_up_process(rt_mutex_top_waiter(lock)->task); ++ lock_top_waiter = rt_mutex_top_waiter(lock); ++ if (prerequeue_top_waiter != lock_top_waiter) ++ rt_mutex_wake_waiter(lock_top_waiter); + raw_spin_unlock_irq(&lock->wait_lock); + return 0; + } +@@ -745,7 +763,7 @@ + */ + rt_mutex_dequeue_pi(task, prerequeue_top_waiter); + rt_mutex_enqueue_pi(task, waiter); +- __rt_mutex_adjust_prio(task); ++ rt_mutex_adjust_prio(task); + + } else if (prerequeue_top_waiter == waiter) { + /* +@@ -761,7 +779,7 @@ + rt_mutex_dequeue_pi(task, waiter); + waiter = rt_mutex_top_waiter(lock); + rt_mutex_enqueue_pi(task, waiter); +- __rt_mutex_adjust_prio(task); ++ rt_mutex_adjust_prio(task); + } else { + /* + * Nothing changed. No need to do any priority +@@ -818,6 +836,7 @@ + return ret; + } + ++ + /* + * Try to take an rt-mutex + * +@@ -828,9 +847,12 @@ + * @waiter: The waiter that is queued to the lock's wait tree if the + * callsite called task_blocked_on_lock(), otherwise NULL + */ +-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +- struct rt_mutex_waiter *waiter) ++static int __try_to_take_rt_mutex(struct rt_mutex *lock, ++ struct task_struct *task, ++ struct rt_mutex_waiter *waiter, int mode) + { ++ lockdep_assert_held(&lock->wait_lock); ++ + /* + * Before testing whether we can acquire @lock, we set the + * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all +@@ -866,8 +888,10 @@ + * If waiter is not the highest priority waiter of + * @lock, give up. + */ +- if (waiter != rt_mutex_top_waiter(lock)) ++ if (waiter != rt_mutex_top_waiter(lock)) { ++ /* XXX rt_mutex_waiter_less() ? */ + return 0; ++ } + + /* + * We can acquire the lock. Remove the waiter from the +@@ -885,14 +909,26 @@ + * not need to be dequeued. + */ + if (rt_mutex_has_waiters(lock)) { ++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; ++ ++ if (task != pown) ++ return 0; ++ ++ /* ++ * Note that RT tasks are excluded from lateral-steals ++ * to prevent the introduction of an unbounded latency. ++ */ ++ if (rt_task(task)) ++ mode = STEAL_NORMAL; + /* + * If @task->prio is greater than or equal to + * the top waiter priority (kernel view), + * @task lost. + */ +- if (task->prio >= rt_mutex_top_waiter(lock)->prio) ++ if (!rt_mutex_waiter_less(task_to_waiter(task), ++ rt_mutex_top_waiter(lock), ++ mode)) + return 0; +- + /* + * The current top waiter stays enqueued. We + * don't have to change anything in the lock +@@ -936,180 +972,592 @@ + */ + rt_mutex_set_owner(lock, task); + +- rt_mutex_deadlock_account_lock(lock, task); +- + return 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL + /* +- * Task blocks on lock. +- * +- * Prepare waiter and propagate pi chain +- * +- * This must be called with lock->wait_lock held and interrupts disabled ++ * preemptible spin_lock functions: + */ +-static int task_blocks_on_rt_mutex(struct rt_mutex *lock, +- struct rt_mutex_waiter *waiter, +- struct task_struct *task, +- enum rtmutex_chainwalk chwalk) ++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock, ++ bool mg_off), ++ bool do_mig_dis) + { +- struct task_struct *owner = rt_mutex_owner(lock); +- struct rt_mutex_waiter *top_waiter = waiter; +- struct rt_mutex *next_lock; +- int chain_walk = 0, res; ++ might_sleep_no_state_check(); + +- /* +- * Early deadlock detection. We really don't want the task to +- * enqueue on itself just to untangle the mess later. It's not +- * only an optimization. We drop the locks, so another waiter +- * can come in before the chain walk detects the deadlock. So +- * the other will detect the deadlock and return -EDEADLOCK, +- * which is wrong, as the other waiter is not in a deadlock +- * situation. +- */ +- if (owner == task) +- return -EDEADLK; ++ if (do_mig_dis) ++ migrate_disable(); + +- raw_spin_lock(&task->pi_lock); +- __rt_mutex_adjust_prio(task); +- waiter->task = task; +- waiter->lock = lock; +- waiter->prio = task->prio; ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) ++ return; ++ else ++ slowfn(lock, do_mig_dis); ++} + +- /* Get the top priority waiter on the lock */ +- if (rt_mutex_has_waiters(lock)) +- top_waiter = rt_mutex_top_waiter(lock); +- rt_mutex_enqueue(lock, waiter); ++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) ++ return; ++ else ++ slowfn(lock); ++} ++#ifdef CONFIG_SMP ++/* ++ * Note that owner is a speculative pointer and dereferencing relies ++ * on rcu_read_lock() and the check against the lock owner. ++ */ ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *owner) ++{ ++ int res = 0; + +- task->pi_blocked_on = waiter; ++ rcu_read_lock(); ++ for (;;) { ++ if (owner != rt_mutex_owner(lock)) ++ break; ++ /* ++ * Ensure that owner->on_cpu is dereferenced _after_ ++ * checking the above to be valid. ++ */ ++ barrier(); ++ if (!owner->on_cpu) { ++ res = 1; ++ break; ++ } ++ cpu_relax(); ++ } ++ rcu_read_unlock(); ++ return res; ++} ++#else ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *orig_owner) ++{ ++ return 1; ++} ++#endif + +- raw_spin_unlock(&task->pi_lock); ++static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task, ++ enum rtmutex_chainwalk chwalk); ++/* ++ * Slow path lock function spin_lock style: this variant is very ++ * careful not to miss any non-lock wakeups. ++ * ++ * We store the current state under p->pi_lock in p->saved_state and ++ * the try_to_wake_up() code handles this accordingly. ++ */ ++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock, ++ bool mg_off) ++{ ++ struct task_struct *lock_owner, *self = current; ++ struct rt_mutex_waiter waiter, *top_waiter; ++ unsigned long flags; ++ int ret; + +- if (!owner) +- return 0; ++ rt_mutex_init_waiter(&waiter, true); + +- raw_spin_lock(&owner->pi_lock); +- if (waiter == rt_mutex_top_waiter(lock)) { +- rt_mutex_dequeue_pi(owner, top_waiter); +- rt_mutex_enqueue_pi(owner, waiter); ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); + +- __rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) +- chain_walk = 1; +- } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { +- chain_walk = 1; ++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ return; + } + +- /* Store the lock on which owner is blocked or NULL */ +- next_lock = task_blocked_on_lock(owner); ++ BUG_ON(rt_mutex_owner(lock) == self); + +- raw_spin_unlock(&owner->pi_lock); + /* +- * Even if full deadlock detection is on, if the owner is not +- * blocked itself, we can avoid finding this out in the chain +- * walk. ++ * We save whatever state the task is in and we'll restore it ++ * after acquiring the lock taking real wakeups into account ++ * as well. We are serialized via pi_lock against wakeups. See ++ * try_to_wake_up(). + */ +- if (!chain_walk || !next_lock) +- return 0; ++ raw_spin_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); + +- /* +- * The owner can't disappear while holding a lock, +- * so the owner struct is protected by wait_lock. +- * Gets dropped in rt_mutex_adjust_prio_chain()! +- */ +- get_task_struct(owner); ++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK); ++ BUG_ON(ret); + +- raw_spin_unlock_irq(&lock->wait_lock); ++ for (;;) { ++ /* Try to acquire the lock again. */ ++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) ++ break; + +- res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, +- next_lock, waiter, task); ++ top_waiter = rt_mutex_top_waiter(lock); ++ lock_owner = rt_mutex_owner(lock); + +- raw_spin_lock_irq(&lock->wait_lock); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +- return res; +-} ++ debug_rt_mutex_print_deadlock(&waiter); + +-/* +- * Remove the top waiter from the current tasks pi waiter tree and +- * queue it up. +- * +- * Called with lock->wait_lock held and interrupts disabled. +- */ +-static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, +- struct rt_mutex *lock) +-{ +- struct rt_mutex_waiter *waiter; ++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) { ++ if (mg_off) ++ migrate_enable(); ++ schedule(); ++ if (mg_off) ++ migrate_disable(); ++ } + +- raw_spin_lock(¤t->pi_lock); ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); + +- waiter = rt_mutex_top_waiter(lock); ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ } + + /* +- * Remove it from current->pi_waiters. We do not adjust a +- * possible priority boost right now. We execute wakeup in the +- * boosted mode and go back to normal after releasing +- * lock->wait_lock. +- */ +- rt_mutex_dequeue_pi(current, waiter); ++ * Restore the task state to current->saved_state. We set it ++ * to the original state above and the try_to_wake_up() code ++ * has possibly updated it when a real (non-rtmutex) wakeup ++ * happened while we were blocked. Clear saved_state so ++ * try_to_wakeup() does not get confused. ++ */ ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ raw_spin_unlock(&self->pi_lock); + + /* +- * As we are waking up the top waiter, and the waiter stays +- * queued on the lock until it gets the lock, this lock +- * obviously has waiters. Just set the bit here and this has +- * the added benefit of forcing all new tasks into the +- * slow path making sure no task of lower priority than +- * the top waiter can steal this lock. ++ * try_to_take_rt_mutex() sets the waiter bit ++ * unconditionally. We might have to fix that up: + */ +- lock->owner = (void *) RT_MUTEX_HAS_WAITERS; ++ fixup_rt_mutex_waiters(lock); + +- raw_spin_unlock(¤t->pi_lock); ++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); ++ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry)); ++ ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +- wake_q_add(wake_q, waiter->task); ++ debug_rt_mutex_free_waiter(&waiter); + } + ++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper); + /* +- * Remove a waiter from a lock and give up +- * +- * Must be called with lock->wait_lock held and interrupts disabled. I must +- * have just failed to try_to_take_rt_mutex(). ++ * Slow path to release a rt_mutex spin_lock style + */ +-static void remove_waiter(struct rt_mutex *lock, +- struct rt_mutex_waiter *waiter) ++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) + { +- bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); +- struct task_struct *owner = rt_mutex_owner(lock); +- struct rt_mutex *next_lock; ++ unsigned long flags; ++ WAKE_Q(wake_q); ++ WAKE_Q(wake_sleeper_q); ++ bool postunlock; + +- raw_spin_lock(¤t->pi_lock); +- rt_mutex_dequeue(lock, waiter); +- current->pi_blocked_on = NULL; +- raw_spin_unlock(¤t->pi_lock); ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +- /* +- * Only update priority if the waiter was the highest priority +- * waiter of the lock and there is an owner to update. +- */ +- if (!owner || !is_top_waiter) +- return; ++ if (postunlock) ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); ++} + +- raw_spin_lock(&owner->pi_lock); ++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock__no_mg); + +- rt_mutex_dequeue_pi(owner, waiter); ++void __lockfunc rt_spin_lock(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock); + +- if (rt_mutex_has_waiters(lock)) +- rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); ++void __lockfunc __rt_spin_lock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true); ++} ++EXPORT_SYMBOL(__rt_spin_lock); + +- __rt_mutex_adjust_prio(owner); ++void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false); ++} ++EXPORT_SYMBOL(__rt_spin_lock__no_mg); + +- /* Store the lock on which owner is blocked or NULL */ +- next_lock = task_blocked_on_lock(owner); ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) ++{ ++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); ++} ++EXPORT_SYMBOL(rt_spin_lock_nested); ++#endif + +- raw_spin_unlock(&owner->pi_lock); ++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(rt_spin_unlock__no_mg); + +- /* +- * Don't walk the chain, if the owner task is not blocked +- * itself. +- */ ++void __lockfunc rt_spin_unlock(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++ migrate_enable(); ++} ++EXPORT_SYMBOL(rt_spin_unlock); ++ ++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(__rt_spin_unlock); ++ ++/* ++ * Wait for the lock to get unlocked: instead of polling for an unlock ++ * (like raw spinlocks do), we lock and unlock, to force the kernel to ++ * schedule if there's contention: ++ */ ++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ spin_unlock(lock); ++} ++EXPORT_SYMBOL(rt_spin_unlock_wait); ++ ++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock) ++{ ++ int ret; ++ ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock__no_mg); ++ ++int __lockfunc rt_spin_trylock(spinlock_t *lock) ++{ ++ int ret; ++ ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock); ++ ++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) ++{ ++ int ret; ++ ++ local_bh_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } else ++ local_bh_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_bh); ++ ++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_irqsave); ++ ++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) ++{ ++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ ++ if (atomic_add_unless(atomic, -1, 1)) ++ return 0; ++ rt_spin_lock(lock); ++ if (atomic_dec_and_test(atomic)) ++ return 1; ++ rt_spin_unlock(lock); ++ return 0; ++} ++EXPORT_SYMBOL(atomic_dec_and_spin_lock); ++ ++ void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++} ++EXPORT_SYMBOL(__rt_spin_lock_init); ++ ++#endif /* PREEMPT_RT_FULL */ ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); ++ ++ if (!hold_ctx) ++ return 0; ++ ++ if (unlikely(ctx == hold_ctx)) ++ return -EALREADY; ++ ++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && ++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); ++ ctx->contending_lock = ww; ++#endif ++ return -EDEADLK; ++ } ++ ++ return 0; ++} ++#else ++ static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ BUG(); ++ return 0; ++} ++ ++#endif ++ ++static inline int ++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter) ++{ ++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); ++} ++ ++/* ++ * Task blocks on lock. ++ * ++ * Prepare waiter and propagate pi chain ++ * ++ * This must be called with lock->wait_lock held and interrupts disabled ++ */ ++static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task, ++ enum rtmutex_chainwalk chwalk) ++{ ++ struct task_struct *owner = rt_mutex_owner(lock); ++ struct rt_mutex_waiter *top_waiter = waiter; ++ struct rt_mutex *next_lock; ++ int chain_walk = 0, res; ++ ++ lockdep_assert_held(&lock->wait_lock); ++ ++ /* ++ * Early deadlock detection. We really don't want the task to ++ * enqueue on itself just to untangle the mess later. It's not ++ * only an optimization. We drop the locks, so another waiter ++ * can come in before the chain walk detects the deadlock. So ++ * the other will detect the deadlock and return -EDEADLOCK, ++ * which is wrong, as the other waiter is not in a deadlock ++ * situation. ++ */ ++ if (owner == task) ++ return -EDEADLK; ++ ++ raw_spin_lock(&task->pi_lock); ++ ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock(&task->pi_lock); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ ++ rt_mutex_adjust_prio(task); ++ waiter->task = task; ++ waiter->lock = lock; ++ waiter->prio = task->prio; ++ waiter->deadline = task->dl.deadline; ++ ++ /* Get the top priority waiter on the lock */ ++ if (rt_mutex_has_waiters(lock)) ++ top_waiter = rt_mutex_top_waiter(lock); ++ rt_mutex_enqueue(lock, waiter); ++ ++ task->pi_blocked_on = waiter; ++ ++ raw_spin_unlock(&task->pi_lock); ++ ++ if (!owner) ++ return 0; ++ ++ raw_spin_lock(&owner->pi_lock); ++ if (waiter == rt_mutex_top_waiter(lock)) { ++ rt_mutex_dequeue_pi(owner, top_waiter); ++ rt_mutex_enqueue_pi(owner, waiter); ++ ++ rt_mutex_adjust_prio(owner); ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) ++ chain_walk = 1; ++ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { ++ chain_walk = 1; ++ } ++ ++ /* Store the lock on which owner is blocked or NULL */ ++ next_lock = task_blocked_on_lock(owner); ++ ++ raw_spin_unlock(&owner->pi_lock); ++ /* ++ * Even if full deadlock detection is on, if the owner is not ++ * blocked itself, we can avoid finding this out in the chain ++ * walk. ++ */ ++ if (!chain_walk || !next_lock) ++ return 0; ++ ++ /* ++ * The owner can't disappear while holding a lock, ++ * so the owner struct is protected by wait_lock. ++ * Gets dropped in rt_mutex_adjust_prio_chain()! ++ */ ++ get_task_struct(owner); ++ ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, ++ next_lock, waiter, task); ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ ++ return res; ++} ++ ++/* ++ * Remove the top waiter from the current tasks pi waiter tree and ++ * queue it up. ++ * ++ * Called with lock->wait_lock held and interrupts disabled. ++ */ ++static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q, ++ struct rt_mutex *lock) ++{ ++ struct rt_mutex_waiter *waiter; ++ ++ raw_spin_lock(¤t->pi_lock); ++ ++ waiter = rt_mutex_top_waiter(lock); ++ ++ /* ++ * Remove it from current->pi_waiters and deboost. ++ * ++ * We must in fact deboost here in order to ensure we call ++ * rt_mutex_setprio() to update p->pi_top_task before the ++ * task unblocks. ++ */ ++ rt_mutex_dequeue_pi(current, waiter); ++ rt_mutex_adjust_prio(current); ++ ++ /* ++ * As we are waking up the top waiter, and the waiter stays ++ * queued on the lock until it gets the lock, this lock ++ * obviously has waiters. Just set the bit here and this has ++ * the added benefit of forcing all new tasks into the ++ * slow path making sure no task of lower priority than ++ * the top waiter can steal this lock. ++ */ ++ lock->owner = (void *) RT_MUTEX_HAS_WAITERS; ++ ++ /* ++ * We deboosted before waking the top waiter task such that we don't ++ * run two tasks with the 'same' priority (and ensure the ++ * p->pi_top_task pointer points to a blocked task). This however can ++ * lead to priority inversion if we would get preempted after the ++ * deboost but before waking our donor task, hence the preempt_disable() ++ * before unlock. ++ * ++ * Pairs with preempt_enable() in rt_mutex_postunlock(); ++ */ ++ preempt_disable(); ++ if (waiter->savestate) ++ wake_q_add(wake_sleeper_q, waiter->task); ++ else ++ wake_q_add(wake_q, waiter->task); ++ raw_spin_unlock(¤t->pi_lock); ++} ++ ++/* ++ * Remove a waiter from a lock and give up ++ * ++ * Must be called with lock->wait_lock held and interrupts disabled. I must ++ * have just failed to try_to_take_rt_mutex(). ++ */ ++static void remove_waiter(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter) ++{ ++ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); ++ struct task_struct *owner = rt_mutex_owner(lock); ++ struct rt_mutex *next_lock = NULL; ++ ++ lockdep_assert_held(&lock->wait_lock); ++ ++ raw_spin_lock(¤t->pi_lock); ++ rt_mutex_dequeue(lock, waiter); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock(¤t->pi_lock); ++ ++ /* ++ * Only update priority if the waiter was the highest priority ++ * waiter of the lock and there is an owner to update. ++ */ ++ if (!owner || !is_top_waiter) ++ return; ++ ++ raw_spin_lock(&owner->pi_lock); ++ ++ rt_mutex_dequeue_pi(owner, waiter); ++ ++ if (rt_mutex_has_waiters(lock)) ++ rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); ++ ++ rt_mutex_adjust_prio(owner); ++ ++ /* Store the lock on which owner is blocked or NULL */ ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) ++ next_lock = task_blocked_on_lock(owner); ++ ++ raw_spin_unlock(&owner->pi_lock); ++ ++ /* ++ * Don't walk the chain, if the owner task is not blocked ++ * itself. ++ */ + if (!next_lock) + return; + +@@ -1138,21 +1586,30 @@ + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || (waiter->prio == task->prio && +- !dl_prio(task->prio))) { ++ if (!rt_mutex_real_waiter(waiter) || ++ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } + next_lock = waiter->lock; +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); + + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); + ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, + next_lock, NULL, task); + } + ++void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) ++{ ++ debug_rt_mutex_init_waiter(waiter); ++ RB_CLEAR_NODE(&waiter->pi_tree_entry); ++ RB_CLEAR_NODE(&waiter->tree_entry); ++ waiter->task = NULL; ++ waiter->savestate = savestate; ++} ++ + /** + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take +@@ -1166,7 +1623,8 @@ + static int __sched + __rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- struct rt_mutex_waiter *waiter) ++ struct rt_mutex_waiter *waiter, ++ struct ww_acquire_ctx *ww_ctx) + { + int ret = 0; + +@@ -1175,16 +1633,17 @@ + if (try_to_take_rt_mutex(lock, current, waiter)) + break; + +- /* +- * TASK_INTERRUPTIBLE checks for signals and +- * timeout. Ignored otherwise. +- */ +- if (unlikely(state == TASK_INTERRUPTIBLE)) { +- /* Signal pending? */ +- if (signal_pending(current)) +- ret = -EINTR; +- if (timeout && !timeout->task) +- ret = -ETIMEDOUT; ++ if (timeout && !timeout->task) { ++ ret = -ETIMEDOUT; ++ break; ++ } ++ if (signal_pending_state(state, current)) { ++ ret = -EINTR; ++ break; ++ } ++ ++ if (ww_ctx && ww_ctx->acquired > 0) { ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); + if (ret) + break; + } +@@ -1199,59 +1658,118 @@ + set_current_state(state); + } + +- __set_current_state(TASK_RUNNING); +- return ret; ++ __set_current_state(TASK_RUNNING); ++ return ret; ++} ++ ++static void rt_mutex_handle_deadlock(int res, int detect_deadlock, ++ struct rt_mutex_waiter *w) ++{ ++ /* ++ * If the result is not -EDEADLOCK or the caller requested ++ * deadlock detection, nothing to do here. ++ */ ++ if (res != -EDEADLOCK || detect_deadlock) ++ return; ++ ++ /* ++ * Yell lowdly and stop the task right here. ++ */ ++ rt_mutex_print_deadlock(w); ++ while (1) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ } ++} ++ ++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++#ifdef CONFIG_DEBUG_MUTEXES ++ /* ++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, ++ * but released with a normal mutex_unlock in this call. ++ * ++ * This should never happen, always use ww_mutex_unlock. ++ */ ++ DEBUG_LOCKS_WARN_ON(ww->ctx); ++ ++ /* ++ * Not quite done after calling ww_acquire_done() ? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); ++ ++ if (ww_ctx->contending_lock) { ++ /* ++ * After -EDEADLK you tried to ++ * acquire a different ww_mutex? Bad! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); ++ ++ /* ++ * You called ww_mutex_lock after receiving -EDEADLK, ++ * but 'forgot' to unlock everything else first? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); ++ ww_ctx->contending_lock = NULL; ++ } ++ ++ /* ++ * Naughty, using a different class will lead to undefined behavior! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); ++#endif ++ ww_ctx->acquired++; + } + +-static void rt_mutex_handle_deadlock(int res, int detect_deadlock, +- struct rt_mutex_waiter *w) ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) + { ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct rt_mutex_waiter *waiter, *n; ++ + /* +- * If the result is not -EDEADLOCK or the caller requested +- * deadlock detection, nothing to do here. ++ * This branch gets optimized out for the common case, ++ * and is only important for ww_mutex_lock. + */ +- if (res != -EDEADLOCK || detect_deadlock) +- return; ++ ww_mutex_lock_acquired(ww, ww_ctx); ++ ww->ctx = ww_ctx; + + /* +- * Yell lowdly and stop the task right here. ++ * Give any possible sleeping processes the chance to wake up, ++ * so they can recheck if they have to back off. + */ +- rt_mutex_print_deadlock(w); +- while (1) { +- set_current_state(TASK_INTERRUPTIBLE); +- schedule(); ++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, ++ tree_entry) { ++ /* XXX debug rt mutex waiter wakeup */ ++ ++ BUG_ON(waiter->lock != lock); ++ rt_mutex_wake_waiter(waiter); + } + } + +-/* +- * Slow path lock function: +- */ +-static int __sched +-rt_mutex_slowlock(struct rt_mutex *lock, int state, +- struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk) +-{ +- struct rt_mutex_waiter waiter; +- unsigned long flags; +- int ret = 0; ++#else + +- debug_rt_mutex_init_waiter(&waiter); +- RB_CLEAR_NODE(&waiter.pi_tree_entry); +- RB_CLEAR_NODE(&waiter.tree_entry); ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ BUG(); ++} ++#endif + +- /* +- * Technically we could use raw_spin_[un]lock_irq() here, but this can +- * be called in early boot if the cmpxchg() fast path is disabled +- * (debug, no architecture support). In this case we will acquire the +- * rtmutex with lock->wait_lock held. But we cannot unconditionally +- * enable interrupts in that early boot case. So we need to use the +- * irqsave/restore variants. +- */ +- raw_spin_lock_irqsave(&lock->wait_lock, flags); ++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, ++ struct rt_mutex_waiter *waiter) ++{ ++ int ret; + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) { +- raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ if (ww_ctx) ++ ww_mutex_account_lock(lock, ww_ctx); + return 0; + } + +@@ -1261,17 +1779,27 @@ + if (unlikely(timeout)) + hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); + +- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); ++ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); + +- if (likely(!ret)) ++ if (likely(!ret)) { + /* sleep on the mutex */ +- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); ++ ret = __rt_mutex_slowlock(lock, state, timeout, waiter, ++ ww_ctx); ++ } else if (ww_ctx) { ++ /* ww_mutex received EDEADLK, let it become EALREADY */ ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); ++ BUG_ON(!ret); ++ } + + if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); + if (rt_mutex_has_waiters(lock)) +- remove_waiter(lock, &waiter); +- rt_mutex_handle_deadlock(ret, chwalk, &waiter); ++ remove_waiter(lock, waiter); ++ /* ww_mutex want to report EDEADLK/EALREADY, let them */ ++ if (!ww_ctx) ++ rt_mutex_handle_deadlock(ret, chwalk, waiter); ++ } else if (ww_ctx) { ++ ww_mutex_account_lock(lock, ww_ctx); + } + + /* +@@ -1279,6 +1807,36 @@ + * unconditionally. We might have to fix that up. + */ + fixup_rt_mutex_waiters(lock); ++ return ret; ++} ++ ++/* ++ * Slow path lock function: ++ */ ++static int __sched ++rt_mutex_slowlock(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ struct rt_mutex_waiter waiter; ++ unsigned long flags; ++ int ret = 0; ++ ++ rt_mutex_init_waiter(&waiter, false); ++ ++ /* ++ * Technically we could use raw_spin_[un]lock_irq() here, but this can ++ * be called in early boot if the cmpxchg() fast path is disabled ++ * (debug, no architecture support). In this case we will acquire the ++ * rtmutex with lock->wait_lock held. But we cannot unconditionally ++ * enable interrupts in that early boot case. So we need to use the ++ * irqsave/restore variants. ++ */ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx, ++ &waiter); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +@@ -1328,10 +1886,12 @@ + + /* + * Slow path to release a rt-mutex. +- * Return whether the current task needs to undo a potential priority boosting. ++ * ++ * Return whether the current task needs to call rt_mutex_postunlock(). + */ + static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +- struct wake_q_head *wake_q) ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q) + { + unsigned long flags; + +@@ -1340,8 +1900,6 @@ + + debug_rt_mutex_unlock(lock); + +- rt_mutex_deadlock_account_unlock(current); +- + /* + * We must be careful here if the fast path is enabled. If we + * have no waiters queued we cannot set owner to NULL here +@@ -1387,12 +1945,10 @@ + * + * Queue the next waiter for wakeup once we release the wait_lock. + */ +- mark_wakeup_next_waiter(wake_q, lock); +- ++ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +- /* check PI boosting */ +- return true; ++ return true; /* call rt_mutex_postunlock() */ + } + + /* +@@ -1403,63 +1959,85 @@ + */ + static inline int + rt_mutex_fastlock(struct rt_mutex *lock, int state, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk)) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx)) + { +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; +- } else +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); ++ ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); + } + + static inline int + rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk)) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (chwalk == RT_MUTEX_MIN_CHAINWALK && +- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; +- } else +- return slowfn(lock, state, timeout, chwalk); ++ ++ return slowfn(lock, state, timeout, chwalk, ww_ctx); + } + + static inline int + rt_mutex_fasttrylock(struct rt_mutex *lock, + int (*slowfn)(struct rt_mutex *lock)) + { +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; +- } ++ + return slowfn(lock); + } + ++/* ++ * Performs the wakeup of the the top-waiter and re-enables preemption. ++ */ ++void rt_mutex_postunlock(struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper) ++{ ++ wake_up_q(wake_q); ++ wake_up_q_sleeper(wq_sleeper); ++ ++ /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ ++ preempt_enable(); ++} ++ + static inline void + rt_mutex_fastunlock(struct rt_mutex *lock, + bool (*slowfn)(struct rt_mutex *lock, +- struct wake_q_head *wqh)) ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper)) + { + WAKE_Q(wake_q); ++ WAKE_Q(wake_sleeper_q); + +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { +- rt_mutex_deadlock_account_unlock(current); ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) ++ return; + +- } else { +- bool deboost = slowfn(lock, &wake_q); ++ if (slowfn(lock, &wake_q, &wake_sleeper_q)) ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); ++} + +- wake_up_q(&wake_q); ++/** ++ * rt_mutex_lock_state - lock a rt_mutex with a given state ++ * ++ * @lock: The rt_mutex to be locked ++ * @state: The state to set when blocking on the rt_mutex ++ */ ++int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state) ++{ ++ might_sleep(); + +- /* Undo pi boosting if necessary: */ +- if (deboost) +- rt_mutex_adjust_prio(current); +- } ++ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock); + } + + /** +@@ -1469,15 +2047,13 @@ + */ + void __sched rt_mutex_lock(struct rt_mutex *lock) + { +- might_sleep(); +- +- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); ++ rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock); + + /** + * rt_mutex_lock_interruptible - lock a rt_mutex interruptible +- * ++ ** + * @lock: the rt_mutex to be locked + * + * Returns: +@@ -1486,23 +2062,32 @@ + */ + int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) + { +- might_sleep(); +- +- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); ++ return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + +-/* +- * Futex variant with full deadlock detection. ++/** ++ * rt_mutex_lock_killable - lock a rt_mutex killable ++ * ++ * @lock: the rt_mutex to be locked ++ * @detect_deadlock: deadlock detection on/off ++ * ++ * Returns: ++ * 0 on success ++ * -EINTR when interrupted by a signal + */ +-int rt_mutex_timed_futex_lock(struct rt_mutex *lock, +- struct hrtimer_sleeper *timeout) ++int __sched rt_mutex_lock_killable(struct rt_mutex *lock) + { +- might_sleep(); ++ return rt_mutex_lock_state(lock, TASK_KILLABLE); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); + +- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, +- RT_MUTEX_FULL_CHAINWALK, +- rt_mutex_slowlock); ++/* ++ * Futex variant, must not use fastpath. ++ */ ++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) ++{ ++ return rt_mutex_slowtrylock(lock); + } + + /** +@@ -1525,6 +2110,7 @@ + + return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, + RT_MUTEX_MIN_CHAINWALK, ++ NULL, + rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); +@@ -1542,7 +2128,11 @@ + */ + int __sched rt_mutex_trylock(struct rt_mutex *lock) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (WARN_ON_ONCE(in_irq() || in_nmi())) ++#else + if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) ++#endif + return 0; + + return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); +@@ -1560,21 +2150,53 @@ + } + EXPORT_SYMBOL_GPL(rt_mutex_unlock); + ++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper) ++{ ++ lockdep_assert_held(&lock->wait_lock); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ return false; /* done */ ++ } ++ ++ /* ++ * We've already deboosted, mark_wakeup_next_waiter() will ++ * retain preempt_disabled when we drop the wait_lock, to ++ * avoid inversion prior to the wakeup. preempt_disable() ++ * therein pairs with rt_mutex_postunlock(). ++ */ ++ mark_wakeup_next_waiter(wake_q, wq_sleeper, lock); ++ ++ return true; /* call postunlock() */ ++} ++ + /** +- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock +- * @lock: the rt_mutex to be unlocked +- * +- * Returns: true/false indicating whether priority adjustment is +- * required or not. ++ * Futex variant, that since futex variants do not use the fast-path, can be ++ * simple and will not need to retry. + */ +-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh) ++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper) + { +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { +- rt_mutex_deadlock_account_unlock(current); +- return false; +- } +- return rt_mutex_slowunlock(lock, wqh); ++ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper); ++} ++ ++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) ++{ ++ WAKE_Q(wake_q); ++ WAKE_Q(wake_sleeper_q); ++ bool postunlock; ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q); ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ if (postunlock) ++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q); + } + + /** +@@ -1607,13 +2229,12 @@ + void __rt_mutex_init(struct rt_mutex *lock, const char *name) + { + lock->owner = NULL; +- raw_spin_lock_init(&lock->wait_lock); + lock->waiters = RB_ROOT; + lock->waiters_leftmost = NULL; + + debug_rt_mutex_init(lock, name); + } +-EXPORT_SYMBOL_GPL(__rt_mutex_init); ++EXPORT_SYMBOL(__rt_mutex_init); + + /** + * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a +@@ -1628,10 +2249,9 @@ + void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) + { +- __rt_mutex_init(lock, NULL); ++ rt_mutex_init(lock); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); +- rt_mutex_deadlock_account_lock(lock, proxy_owner); + } + + /** +@@ -1647,34 +2267,45 @@ + { + debug_rt_mutex_proxy_unlock(lock); + rt_mutex_set_owner(lock, NULL); +- rt_mutex_deadlock_account_unlock(proxy_owner); + } + +-/** +- * rt_mutex_start_proxy_lock() - Start lock acquisition for another task +- * @lock: the rt_mutex to take +- * @waiter: the pre-initialized rt_mutex_waiter +- * @task: the task to prepare +- * +- * Returns: +- * 0 - task blocked on lock +- * 1 - acquired the lock for task, caller should wake it up +- * <0 - error +- * +- * Special API call for FUTEX_REQUEUE_PI support. +- */ +-int rt_mutex_start_proxy_lock(struct rt_mutex *lock, ++int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) + { + int ret; + +- raw_spin_lock_irq(&lock->wait_lock); ++ if (try_to_take_rt_mutex(lock, task, NULL)) ++ return 1; + +- if (try_to_take_rt_mutex(lock, task, NULL)) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * In PREEMPT_RT there's an added race. ++ * If the task, that we are about to requeue, times out, ++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue ++ * to skip this task. But right after the task sets ++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then ++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. ++ * This will replace the PI_WAKEUP_INPROGRESS with the actual ++ * lock that it blocks on. We *must not* place this task ++ * on this proxy lock in that case. ++ * ++ * To prevent this race, we first take the task's pi_lock ++ * and check if it has updated its pi_blocked_on. If it has, ++ * we assume that it woke up and we return -EAGAIN. ++ * Otherwise, we set the task's pi_blocked_on to ++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up ++ * it will know that we are in the process of requeuing it. ++ */ ++ raw_spin_lock(&task->pi_lock); ++ if (task->pi_blocked_on) { ++ raw_spin_unlock(&task->pi_lock); + raw_spin_unlock_irq(&lock->wait_lock); +- return 1; ++ return -EAGAIN; + } ++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; ++ raw_spin_unlock(&task->pi_lock); ++#endif + + /* We enforce deadlock detection for futexes */ + ret = task_blocks_on_rt_mutex(lock, waiter, task, +@@ -1690,17 +2321,41 @@ + ret = 0; + } + +- if (unlikely(ret)) ++ if (ret && rt_mutex_has_waiters(lock)) + remove_waiter(lock, waiter); + +- raw_spin_unlock_irq(&lock->wait_lock); +- + debug_rt_mutex_print_deadlock(waiter); + + return ret; + } + + /** ++ * rt_mutex_start_proxy_lock() - Start lock acquisition for another task ++ * @lock: the rt_mutex to take ++ * @waiter: the pre-initialized rt_mutex_waiter ++ * @task: the task to prepare ++ * ++ * Returns: ++ * 0 - task blocked on lock ++ * 1 - acquired the lock for task, caller should wake it up ++ * <0 - error ++ * ++ * Special API call for FUTEX_REQUEUE_PI support. ++ */ ++int rt_mutex_start_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task) ++{ ++ int ret; ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ ret = __rt_mutex_start_proxy_lock(lock, waiter, task); ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ return ret; ++} ++ ++/** + * rt_mutex_next_owner - return the next owner of the lock + * + * @lock: the rt lock query +@@ -1721,24 +2376,27 @@ + } + + /** +- * rt_mutex_finish_proxy_lock() - Complete lock acquisition ++ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition + * @lock: the rt_mutex we were woken on + * @to: the timeout, null if none. hrtimer should already have + * been started. + * @waiter: the pre-initialized rt_mutex_waiter + * +- * Complete the lock acquisition started our behalf by another thread. ++ * Wait for the the lock acquisition started on our behalf by ++ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call ++ * rt_mutex_cleanup_proxy_lock(). + * + * Returns: + * 0 - success + * <0 - error, one of -EINTR, -ETIMEDOUT + * +- * Special API call for PI-futex requeue support ++ * Special API call for PI-futex support + */ +-int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, ++int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter) + { ++ struct task_struct *tsk = current; + int ret; + + raw_spin_lock_irq(&lock->wait_lock); +@@ -1746,10 +2404,65 @@ + set_current_state(TASK_INTERRUPTIBLE); + + /* sleep on the mutex */ +- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); ++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); ++ ++ /* ++ * RT has a problem here when the wait got interrupted by a timeout ++ * or a signal. task->pi_blocked_on is still set. The task must ++ * acquire the hash bucket lock when returning from this function. ++ * ++ * If the hash bucket lock is contended then the ++ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in ++ * task_blocks_on_rt_mutex() will trigger. This can be avoided by ++ * clearing task->pi_blocked_on which removes the task from the ++ * boosting chain of the rtmutex. That's correct because the task ++ * is not longer blocked on it. ++ */ ++ if (ret) { ++ raw_spin_lock(&tsk->pi_lock); ++ tsk->pi_blocked_on = NULL; ++ raw_spin_unlock(&tsk->pi_lock); ++ } ++ ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ return ret; ++} ++ ++/** ++ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition ++ * @lock: the rt_mutex we were woken on ++ * @waiter: the pre-initialized rt_mutex_waiter ++ * ++ * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). ++ * ++ * Unless we acquired the lock; we're still enqueued on the wait-list and can ++ * in fact still be granted ownership until we're removed. Therefore we can ++ * find we are in fact the owner and must disregard the ++ * rt_mutex_wait_proxy_lock() failure. ++ * ++ * Returns: ++ * true - did the cleanup, we done. ++ * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, ++ * caller should disregards its return value. ++ * ++ * Special API call for PI-futex support ++ */ ++bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter) ++{ ++ bool cleanup = false; + +- if (unlikely(ret)) ++ raw_spin_lock_irq(&lock->wait_lock); ++ /* ++ * Unless we're the owner; we're still enqueued on the wait_list. ++ * So check if we became owner, if not, take us off the wait_list. ++ */ ++ if (rt_mutex_owner(lock) != current) { + remove_waiter(lock, waiter); ++ fixup_rt_mutex_waiters(lock); ++ cleanup = true; ++ } + + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might +@@ -1759,5 +2472,91 @@ + + raw_spin_unlock_irq(&lock->wait_lock); + ++ return cleanup; ++} ++ ++static inline int ++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ++ unsigned tmp; ++ ++ if (ctx->deadlock_inject_countdown-- == 0) { ++ tmp = ctx->deadlock_inject_interval; ++ if (tmp > UINT_MAX/4) ++ tmp = UINT_MAX; ++ else ++ tmp = tmp*2 + tmp + tmp/2; ++ ++ ctx->deadlock_inject_interval = tmp; ++ ctx->deadlock_inject_countdown = tmp; ++ ctx->contending_lock = lock; ++ ++ ww_mutex_unlock(lock); ++ ++ return -EDEADLK; ++ } ++#endif ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++int __sched ++__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ww_ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ww_ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); ++ ++int __sched ++__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ww_ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ww_ctx); ++ + return ret; + } ++EXPORT_SYMBOL_GPL(__ww_mutex_lock); ++ ++void __sched ww_mutex_unlock(struct ww_mutex *lock) ++{ ++ int nest = !!lock->ctx; ++ ++ /* ++ * The unlocking fastpath is the 0->1 transition from 'locked' ++ * into 'unlocked' state: ++ */ ++ if (nest) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); ++#endif ++ if (lock->ctx->acquired > 0) ++ lock->ctx->acquired--; ++ lock->ctx = NULL; ++ } ++ ++ mutex_release(&lock->base.dep_map, nest, _RET_IP_); ++ rt_mutex_unlock(&lock->base.lock); ++} ++EXPORT_SYMBOL(ww_mutex_unlock); ++#endif +diff -Nur linux-4.9.28.orig/kernel/locking/rtmutex_common.h linux-4.9.28/kernel/locking/rtmutex_common.h +--- linux-4.9.28.orig/kernel/locking/rtmutex_common.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/rtmutex_common.h 2017-05-19 03:37:25.186176955 +0200 +@@ -27,12 +27,14 @@ + struct rb_node pi_tree_entry; + struct task_struct *task; + struct rt_mutex *lock; ++ bool savestate; + #ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + struct pid *deadlock_task_pid; + struct rt_mutex *deadlock_lock; + #endif + int prio; ++ u64 deadline; + }; + + /* +@@ -98,21 +100,45 @@ + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); + extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, + struct task_struct *proxy_owner); ++extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate); ++extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task); + extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task); +-extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, +- struct hrtimer_sleeper *to, +- struct rt_mutex_waiter *waiter); +-extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); +-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh); +-extern void rt_mutex_adjust_prio(struct task_struct *task); ++extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, ++ struct hrtimer_sleeper *to, ++ struct rt_mutex_waiter *waiter); ++extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter); ++ ++extern int rt_mutex_futex_trylock(struct rt_mutex *l); ++ ++extern void rt_mutex_futex_unlock(struct rt_mutex *lock); ++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper); ++ ++extern void rt_mutex_postunlock(struct wake_q_head *wake_q, ++ struct wake_q_head *wq_sleeper); ++ ++/* RW semaphore special interface */ ++struct ww_acquire_ctx; ++ ++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, ++ struct hrtimer_sleeper *timeout, ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, ++ struct rt_mutex_waiter *waiter); + + #ifdef CONFIG_DEBUG_RT_MUTEXES + # include "rtmutex-debug.h" +diff -Nur linux-4.9.28.orig/kernel/locking/rtmutex-debug.c linux-4.9.28/kernel/locking/rtmutex-debug.c +--- linux-4.9.28.orig/kernel/locking/rtmutex-debug.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/rtmutex-debug.c 2017-05-19 03:37:25.186176955 +0200 +@@ -173,12 +173,3 @@ + lock->name = name; + } + +-void +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) +-{ +-} +- +-void rt_mutex_deadlock_account_unlock(struct task_struct *task) +-{ +-} +- +diff -Nur linux-4.9.28.orig/kernel/locking/rtmutex-debug.h linux-4.9.28/kernel/locking/rtmutex-debug.h +--- linux-4.9.28.orig/kernel/locking/rtmutex-debug.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/rtmutex-debug.h 2017-05-19 03:37:25.186176955 +0200 +@@ -9,9 +9,6 @@ + * This file contains macros used solely by rtmutex.c. Debug version. + */ + +-extern void +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); +-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); + extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); + extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); + extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); +diff -Nur linux-4.9.28.orig/kernel/locking/rtmutex.h linux-4.9.28/kernel/locking/rtmutex.h +--- linux-4.9.28.orig/kernel/locking/rtmutex.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/rtmutex.h 2017-05-19 03:37:25.186176955 +0200 +@@ -11,8 +11,6 @@ + */ + + #define rt_mutex_deadlock_check(l) (0) +-#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) +-#define rt_mutex_deadlock_account_unlock(l) do { } while (0) + #define debug_rt_mutex_init_waiter(w) do { } while (0) + #define debug_rt_mutex_free_waiter(w) do { } while (0) + #define debug_rt_mutex_lock(l) do { } while (0) +diff -Nur linux-4.9.28.orig/kernel/locking/rwsem-rt.c linux-4.9.28/kernel/locking/rwsem-rt.c +--- linux-4.9.28.orig/kernel/locking/rwsem-rt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/kernel/locking/rwsem-rt.c 2017-05-19 03:37:25.186176955 +0200 +@@ -0,0 +1,268 @@ ++/* ++ */ ++#include <linux/rwsem.h> ++#include <linux/sched.h> ++#include <linux/export.h> ++ ++#include "rtmutex_common.h" ++ ++/* ++ * RT-specific reader/writer semaphores ++ * ++ * down_write() ++ * 1) Lock sem->rtmutex ++ * 2) Remove the reader BIAS to force readers into the slow path ++ * 3) Wait until all readers have left the critical region ++ * 4) Mark it write locked ++ * ++ * up_write() ++ * 1) Remove the write locked marker ++ * 2) Set the reader BIAS so readers can use the fast path again ++ * 3) Unlock sem->rtmutex to release blocked readers ++ * ++ * down_read() ++ * 1) Try fast path acquisition (reader BIAS is set) ++ * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag ++ * 3) If !writelocked, acquire it for read ++ * 4) If writelocked, block on sem->rtmutex ++ * 5) unlock sem->rtmutex, goto 1) ++ * ++ * up_read() ++ * 1) Try fast path release (reader count != 1) ++ * 2) Wake the writer waiting in down_write()#3 ++ * ++ * down_read()#3 has the consequence, that rw semaphores on RT are not writer ++ * fair, but writers, which should be avoided in RT tasks (think mmap_sem), ++ * are subject to the rtmutex priority/DL inheritance mechanism. ++ * ++ * It's possible to make the rw semaphores writer fair by keeping a list of ++ * active readers. A blocked writer would force all newly incoming readers to ++ * block on the rtmutex, but the rtmutex would have to be proxy locked for one ++ * reader after the other. We can't use multi-reader inheritance because there ++ * is no way to support that with SCHED_DEADLINE. Implementing the one by one ++ * reader boosting/handover mechanism is a major surgery for a very dubious ++ * value. ++ * ++ * The risk of writer starvation is there, but the pathological use cases ++ * which trigger it are not necessarily the typical RT workloads. ++ */ ++ ++void __rwsem_init(struct rw_semaphore *sem, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held semaphore: ++ */ ++ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); ++ lockdep_init_map(&sem->dep_map, name, key, 0); ++#endif ++ atomic_set(&sem->readers, READER_BIAS); ++} ++EXPORT_SYMBOL(__rwsem_init); ++ ++int __down_read_trylock(struct rw_semaphore *sem) ++{ ++ int r, old; ++ ++ /* ++ * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is ++ * set. ++ */ ++ for (r = atomic_read(&sem->readers); r < 0;) { ++ old = atomic_cmpxchg(&sem->readers, r, r + 1); ++ if (likely(old == r)) ++ return 1; ++ r = old; ++ } ++ return 0; ++} ++ ++void __sched __down_read(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ struct rt_mutex_waiter waiter; ++ ++ if (__down_read_trylock(sem)) ++ return; ++ ++ might_sleep(); ++ raw_spin_lock_irq(&m->wait_lock); ++ /* ++ * Allow readers as long as the writer has not completely ++ * acquired the semaphore for write. ++ */ ++ if (atomic_read(&sem->readers) != WRITER_BIAS) { ++ atomic_inc(&sem->readers); ++ raw_spin_unlock_irq(&m->wait_lock); ++ return; ++ } ++ ++ /* ++ * Call into the slow lock path with the rtmutex->wait_lock ++ * held, so this can't result in the following race: ++ * ++ * Reader1 Reader2 Writer ++ * down_read() ++ * down_write() ++ * rtmutex_lock(m) ++ * swait() ++ * down_read() ++ * unlock(m->wait_lock) ++ * up_read() ++ * swake() ++ * lock(m->wait_lock) ++ * sem->writelocked=true ++ * unlock(m->wait_lock) ++ * ++ * up_write() ++ * sem->writelocked=false ++ * rtmutex_unlock(m) ++ * down_read() ++ * down_write() ++ * rtmutex_lock(m) ++ * swait() ++ * rtmutex_lock(m) ++ * ++ * That would put Reader1 behind the writer waiting on ++ * Reader2 to call up_read() which might be unbound. ++ */ ++ rt_mutex_init_waiter(&waiter, false); ++ rt_mutex_slowlock_locked(m, TASK_UNINTERRUPTIBLE, NULL, ++ RT_MUTEX_MIN_CHAINWALK, NULL, ++ &waiter); ++ /* ++ * The slowlock() above is guaranteed to return with the rtmutex is ++ * now held, so there can't be a writer active. Increment the reader ++ * count and immediately drop the rtmutex again. ++ */ ++ atomic_inc(&sem->readers); ++ raw_spin_unlock_irq(&m->wait_lock); ++ rt_mutex_unlock(m); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++void __up_read(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ struct task_struct *tsk; ++ ++ /* ++ * sem->readers can only hit 0 when a writer is waiting for the ++ * active readers to leave the critical region. ++ */ ++ if (!atomic_dec_and_test(&sem->readers)) ++ return; ++ ++ might_sleep(); ++ raw_spin_lock_irq(&m->wait_lock); ++ /* ++ * Wake the writer, i.e. the rtmutex owner. It might release the ++ * rtmutex concurrently in the fast path (due to a signal), but to ++ * clean up the rwsem it needs to acquire m->wait_lock. The worst ++ * case which can happen is a spurious wakeup. ++ */ ++ tsk = rt_mutex_owner(m); ++ if (tsk) ++ wake_up_process(tsk); ++ ++ raw_spin_unlock_irq(&m->wait_lock); ++} ++ ++static void __up_write_unlock(struct rw_semaphore *sem, int bias, ++ unsigned long flags) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ ++ atomic_add(READER_BIAS - bias, &sem->readers); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ rt_mutex_unlock(m); ++} ++ ++static int __sched __down_write_common(struct rw_semaphore *sem, int state) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ /* Take the rtmutex as a first step */ ++ if (rt_mutex_lock_state(m, state)) ++ return -EINTR; ++ ++ /* Force readers into slow path */ ++ atomic_sub(READER_BIAS, &sem->readers); ++ might_sleep(); ++ ++ set_current_state(state); ++ for (;;) { ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ /* Have all readers left the critical region? */ ++ if (!atomic_read(&sem->readers)) { ++ atomic_set(&sem->readers, WRITER_BIAS); ++ __set_current_state(TASK_RUNNING); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return 0; ++ } ++ ++ if (signal_pending_state(state, current)) { ++ __set_current_state(TASK_RUNNING); ++ __up_write_unlock(sem, 0, flags); ++ return -EINTR; ++ } ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ ++ if (atomic_read(&sem->readers) != 0) { ++ schedule(); ++ set_current_state(state); ++ } ++ } ++} ++ ++void __sched __down_write(struct rw_semaphore *sem) ++{ ++ __down_write_common(sem, TASK_UNINTERRUPTIBLE); ++} ++ ++int __sched __down_write_killable(struct rw_semaphore *sem) ++{ ++ return __down_write_common(sem, TASK_KILLABLE); ++} ++ ++int __down_write_trylock(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ if (!rt_mutex_trylock(m)) ++ return 0; ++ ++ atomic_sub(READER_BIAS, &sem->readers); ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ if (!atomic_read(&sem->readers)) { ++ atomic_set(&sem->readers, WRITER_BIAS); ++ raw_spin_unlock_irqrestore(&m->wait_lock, flags); ++ return 1; ++ } ++ __up_write_unlock(sem, 0, flags); ++ return 0; ++} ++ ++void __up_write(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ __up_write_unlock(sem, WRITER_BIAS, flags); ++} ++ ++void __downgrade_write(struct rw_semaphore *sem) ++{ ++ struct rt_mutex *m = &sem->rtmutex; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&m->wait_lock, flags); ++ /* Release it and account current as reader */ ++ __up_write_unlock(sem, WRITER_BIAS - 1, flags); ++} +diff -Nur linux-4.9.28.orig/kernel/locking/spinlock.c linux-4.9.28/kernel/locking/spinlock.c +--- linux-4.9.28.orig/kernel/locking/spinlock.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/spinlock.c 2017-05-19 03:37:25.186176955 +0200 +@@ -124,8 +124,11 @@ + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -209,6 +212,8 @@ + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -353,6 +358,8 @@ + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +diff -Nur linux-4.9.28.orig/kernel/locking/spinlock_debug.c linux-4.9.28/kernel/locking/spinlock_debug.c +--- linux-4.9.28.orig/kernel/locking/spinlock_debug.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/locking/spinlock_debug.c 2017-05-19 03:37:25.190177110 +0200 +@@ -31,6 +31,7 @@ + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT_FULL + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -159,6 +161,7 @@ + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -300,3 +303,5 @@ + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif +diff -Nur linux-4.9.28.orig/kernel/module.c linux-4.9.28/kernel/module.c +--- linux-4.9.28.orig/kernel/module.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/module.c 2017-05-19 03:37:25.190177110 +0200 +@@ -660,16 +660,7 @@ + memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); + } + +-/** +- * is_module_percpu_address - test whether address is from module static percpu +- * @addr: address to test +- * +- * Test whether @addr belongs to module static percpu area. +- * +- * RETURNS: +- * %true if @addr is from module static percpu area +- */ +-bool is_module_percpu_address(unsigned long addr) ++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) + { + struct module *mod; + unsigned int cpu; +@@ -683,9 +674,15 @@ + continue; + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(mod->percpu, cpu); ++ void *va = (void *)addr; + +- if ((void *)addr >= start && +- (void *)addr < start + mod->percpu_size) { ++ if (va >= start && va < start + mod->percpu_size) { ++ if (can_addr) { ++ *can_addr = (unsigned long) (va - start); ++ *can_addr += (unsigned long) ++ per_cpu_ptr(mod->percpu, ++ get_boot_cpu_id()); ++ } + preempt_enable(); + return true; + } +@@ -696,6 +693,20 @@ + return false; + } + ++/** ++ * is_module_percpu_address - test whether address is from module static percpu ++ * @addr: address to test ++ * ++ * Test whether @addr belongs to module static percpu area. ++ * ++ * RETURNS: ++ * %true if @addr is from module static percpu area ++ */ ++bool is_module_percpu_address(unsigned long addr) ++{ ++ return __is_module_percpu_address(addr, NULL); ++} ++ + #else /* ... !CONFIG_SMP */ + + static inline void __percpu *mod_percpu(struct module *mod) +@@ -726,6 +737,11 @@ + { + return false; + } ++ ++bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) ++{ ++ return false; ++} + + #endif /* CONFIG_SMP */ + +diff -Nur linux-4.9.28.orig/kernel/panic.c linux-4.9.28/kernel/panic.c +--- linux-4.9.28.orig/kernel/panic.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/panic.c 2017-05-19 03:37:25.190177110 +0200 +@@ -482,9 +482,11 @@ + + static int init_oops_id(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + else ++#endif + oops_id++; + + return 0; +diff -Nur linux-4.9.28.orig/kernel/power/hibernate.c linux-4.9.28/kernel/power/hibernate.c +--- linux-4.9.28.orig/kernel/power/hibernate.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/power/hibernate.c 2017-05-19 03:37:25.190177110 +0200 +@@ -286,6 +286,8 @@ + + local_irq_disable(); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (error) { + printk(KERN_ERR "PM: Some system devices failed to power down, " +@@ -317,6 +319,7 @@ + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -446,6 +449,7 @@ + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + + error = syscore_suspend(); + if (error) +@@ -479,6 +483,7 @@ + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -564,6 +569,7 @@ + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + syscore_suspend(); + if (pm_wakeup_pending()) { + error = -EAGAIN; +@@ -576,6 +582,7 @@ + + Power_up: + syscore_resume(); ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -676,6 +683,10 @@ + return error; + } + ++#ifndef CONFIG_SUSPEND ++bool pm_in_action; ++#endif ++ + /** + * hibernate - Carry out system hibernation, including saving the image. + */ +@@ -689,6 +700,8 @@ + return -EPERM; + } + ++ pm_in_action = true; ++ + lock_system_sleep(); + /* The snapshot device should not be opened while we're running */ + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { +@@ -766,6 +779,7 @@ + atomic_inc(&snapshot_device_available); + Unlock: + unlock_system_sleep(); ++ pm_in_action = false; + return error; + } + +diff -Nur linux-4.9.28.orig/kernel/power/suspend.c linux-4.9.28/kernel/power/suspend.c +--- linux-4.9.28.orig/kernel/power/suspend.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/power/suspend.c 2017-05-19 03:37:25.190177110 +0200 +@@ -369,6 +369,8 @@ + arch_suspend_disable_irqs(); + BUG_ON(!irqs_disabled()); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (!error) { + *wakeup = pm_wakeup_pending(); +@@ -385,6 +387,8 @@ + syscore_resume(); + } + ++ system_state = SYSTEM_RUNNING; ++ + arch_suspend_enable_irqs(); + BUG_ON(irqs_disabled()); + +@@ -527,6 +531,8 @@ + return error; + } + ++bool pm_in_action; ++ + /** + * pm_suspend - Externally visible function for suspending the system. + * @state: System sleep state to enter. +@@ -541,6 +547,8 @@ + if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) + return -EINVAL; + ++ pm_in_action = true; ++ + error = enter_state(state); + if (error) { + suspend_stats.fail++; +@@ -548,6 +556,7 @@ + } else { + suspend_stats.success++; + } ++ pm_in_action = false; + return error; + } + EXPORT_SYMBOL(pm_suspend); +diff -Nur linux-4.9.28.orig/kernel/printk/printk.c linux-4.9.28/kernel/printk/printk.c +--- linux-4.9.28.orig/kernel/printk/printk.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/printk/printk.c 2017-05-19 03:37:25.190177110 +0200 +@@ -351,6 +351,65 @@ + */ + DEFINE_RAW_SPINLOCK(logbuf_lock); + ++#ifdef CONFIG_EARLY_PRINTK ++struct console *early_console; ++ ++static void early_vprintk(const char *fmt, va_list ap) ++{ ++ if (early_console) { ++ char buf[512]; ++ int n = vscnprintf(buf, sizeof(buf), fmt, ap); ++ ++ early_console->write(early_console, buf, n); ++ } ++} ++ ++asmlinkage void early_printk(const char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ early_vprintk(fmt, ap); ++ va_end(ap); ++} ++ ++/* ++ * This is independent of any log levels - a global ++ * kill switch that turns off all of printk. ++ * ++ * Used by the NMI watchdog if early-printk is enabled. ++ */ ++static bool __read_mostly printk_killswitch; ++ ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = true; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ ++void printk_kill(void) ++{ ++ printk_killswitch = true; ++} ++ ++#ifdef CONFIG_PRINTK ++static int forced_early_printk(const char *fmt, va_list ap) ++{ ++ if (!printk_killswitch) ++ return 0; ++ early_vprintk(fmt, ap); ++ return 1; ++} ++#endif ++ ++#else ++static inline int forced_early_printk(const char *fmt, va_list ap) ++{ ++ return 0; ++} ++#endif ++ + #ifdef CONFIG_PRINTK + DECLARE_WAIT_QUEUE_HEAD(log_wait); + /* the next printk record to read by syslog(READ) or /proc/kmsg */ +@@ -1337,6 +1396,7 @@ + { + char *text; + int len = 0; ++ int attempts = 0; + + text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); + if (!text) +@@ -1348,6 +1408,14 @@ + u64 seq; + u32 idx; + enum log_flags prev; ++ int num_msg; ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; + + /* + * Find first record that fits, including all following records, +@@ -1363,6 +1431,14 @@ + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* move first record forward until length fits into the buffer */ +@@ -1376,6 +1452,14 @@ + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* last message fitting into this dump */ +@@ -1416,6 +1500,7 @@ + clear_seq = log_next_seq; + clear_idx = log_next_idx; + } ++out: + raw_spin_unlock_irq(&logbuf_lock); + + kfree(text); +@@ -1569,6 +1654,12 @@ + if (!console_drivers) + return; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { ++ if (in_irq() || in_nmi()) ++ return; ++ } ++ ++ migrate_disable(); + for_each_console(con) { + if (exclusive_console && con != exclusive_console) + continue; +@@ -1584,6 +1675,7 @@ + else + con->write(con, text, len); + } ++ migrate_enable(); + } + + /* +@@ -1781,6 +1873,13 @@ + /* cpu currently holding logbuf_lock in this function */ + static unsigned int logbuf_cpu = UINT_MAX; + ++ /* ++ * Fall back to early_printk if a debugging subsystem has ++ * killed printk output ++ */ ++ if (unlikely(forced_early_printk(fmt, args))) ++ return 1; ++ + if (level == LOGLEVEL_SCHED) { + level = LOGLEVEL_DEFAULT; + in_sched = true; +@@ -1885,13 +1984,23 @@ + + /* If called from the scheduler, we can not call up(). */ + if (!in_sched) { ++ int may_trylock = 1; ++ + lockdep_off(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * we can't take a sleeping lock with IRQs or preeption disabled ++ * so we can't print in these contexts ++ */ ++ if (!(preempt_count() == 0 && !irqs_disabled())) ++ may_trylock = 0; ++#endif + /* + * Try to acquire and then immediately release the console + * semaphore. The release will print out buffers and wake up + * /dev/kmsg and syslog() users. + */ +- if (console_trylock()) ++ if (may_trylock && console_trylock()) + console_unlock(); + lockdep_on(); + } +@@ -2014,26 +2123,6 @@ + + #endif /* CONFIG_PRINTK */ + +-#ifdef CONFIG_EARLY_PRINTK +-struct console *early_console; +- +-asmlinkage __visible void early_printk(const char *fmt, ...) +-{ +- va_list ap; +- char buf[512]; +- int n; +- +- if (!early_console) +- return; +- +- va_start(ap, fmt); +- n = vscnprintf(buf, sizeof(buf), fmt, ap); +- va_end(ap); +- +- early_console->write(early_console, buf, n); +-} +-#endif +- + static int __add_preferred_console(char *name, int idx, char *options, + char *brl_options) + { +@@ -2303,11 +2392,16 @@ + goto out; + + len = cont_print_text(text, size); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(cont.level, NULL, 0, text, len); ++#else + raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); + call_console_drivers(cont.level, NULL, 0, text, len); + start_critical_timings(); + local_irq_restore(flags); ++#endif + return; + out: + raw_spin_unlock_irqrestore(&logbuf_lock, flags); +@@ -2431,13 +2525,17 @@ + console_idx = log_next(console_idx); + console_seq++; + console_prev = msg->flags; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(level, ext_text, ext_len, text, len); ++#else + raw_spin_unlock(&logbuf_lock); + + stop_critical_timings(); /* don't trace print latency */ + call_console_drivers(level, ext_text, ext_len, text, len); + start_critical_timings(); + local_irq_restore(flags); +- ++#endif + if (do_cond_resched) + cond_resched(); + } +@@ -2489,6 +2587,11 @@ + { + struct console *c; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { ++ if (in_irq() || in_nmi()) ++ return; ++ } ++ + /* + * console_unblank can no longer be called in interrupt context unless + * oops_in_progress is set to 1.. +diff -Nur linux-4.9.28.orig/kernel/ptrace.c linux-4.9.28/kernel/ptrace.c +--- linux-4.9.28.orig/kernel/ptrace.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/ptrace.c 2017-05-19 03:37:25.190177110 +0200 +@@ -166,7 +166,14 @@ + + spin_lock_irq(&task->sighand->siglock); + if (task_is_traced(task) && !__fatal_signal_pending(task)) { +- task->state = __TASK_TRACED; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ if (task->state & __TASK_TRACED) ++ task->state = __TASK_TRACED; ++ else ++ task->saved_state = __TASK_TRACED; ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + ret = true; + } + spin_unlock_irq(&task->sighand->siglock); +diff -Nur linux-4.9.28.orig/kernel/rcu/rcutorture.c linux-4.9.28/kernel/rcu/rcutorture.c +--- linux-4.9.28.orig/kernel/rcu/rcutorture.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/rcu/rcutorture.c 2017-05-19 03:37:25.190177110 +0200 +@@ -404,6 +404,7 @@ + .name = "rcu" + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Definitions for rcu_bh torture testing. + */ +@@ -443,6 +444,12 @@ + .name = "rcu_bh" + }; + ++#else ++static struct rcu_torture_ops rcu_bh_ops = { ++ .ttype = INVALID_RCU_FLAVOR, ++}; ++#endif ++ + /* + * Don't even think about trying any of these in real life!!! + * The names includes "busted", and they really means it! +diff -Nur linux-4.9.28.orig/kernel/rcu/tree.c linux-4.9.28/kernel/rcu/tree.c +--- linux-4.9.28.orig/kernel/rcu/tree.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/rcu/tree.c 2017-05-19 03:37:25.190177110 +0200 +@@ -55,6 +55,11 @@ + #include <linux/random.h> + #include <linux/trace_events.h> + #include <linux/suspend.h> ++#include <linux/delay.h> ++#include <linux/gfp.h> ++#include <linux/oom.h> ++#include <linux/smpboot.h> ++#include "../time/tick-internal.h" + + #include "tree.h" + #include "rcu.h" +@@ -260,6 +265,19 @@ + this_cpu_ptr(&rcu_sched_data), true); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void rcu_preempt_qs(void); ++ ++void rcu_bh_qs(void) ++{ ++ unsigned long flags; ++ ++ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ ++ local_irq_save(flags); ++ rcu_preempt_qs(); ++ local_irq_restore(flags); ++} ++#else + void rcu_bh_qs(void) + { + if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { +@@ -269,6 +287,7 @@ + __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); + } + } ++#endif + + static DEFINE_PER_CPU(int, rcu_sched_qs_mask); + +@@ -449,11 +468,13 @@ + /* + * Return the number of RCU BH batches started thus far for debug & stats. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + unsigned long rcu_batches_started_bh(void) + { + return rcu_bh_state.gpnum; + } + EXPORT_SYMBOL_GPL(rcu_batches_started_bh); ++#endif + + /* + * Return the number of RCU batches completed thus far for debug & stats. +@@ -473,6 +494,7 @@ + } + EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Return the number of RCU BH batches completed thus far for debug & stats. + */ +@@ -481,6 +503,7 @@ + return rcu_bh_state.completed; + } + EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); ++#endif + + /* + * Return the number of RCU expedited batches completed thus far for +@@ -504,6 +527,7 @@ + } + EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Force a quiescent state. + */ +@@ -522,6 +546,13 @@ + } + EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); + ++#else ++void rcu_force_quiescent_state(void) ++{ ++} ++EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); ++#endif ++ + /* + * Force a quiescent state for RCU-sched. + */ +@@ -572,9 +603,11 @@ + case RCU_FLAVOR: + rsp = rcu_state_p; + break; ++#ifndef CONFIG_PREEMPT_RT_FULL + case RCU_BH_FLAVOR: + rsp = &rcu_bh_state; + break; ++#endif + case RCU_SCHED_FLAVOR: + rsp = &rcu_sched_state; + break; +@@ -3016,18 +3049,17 @@ + /* + * Do RCU core processing for the current CPU. + */ +-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) ++static __latent_entropy void rcu_process_callbacks(void) + { + struct rcu_state *rsp; + + if (cpu_is_offline(smp_processor_id())) + return; +- trace_rcu_utilization(TPS("Start RCU core")); + for_each_rcu_flavor(rsp) + __rcu_process_callbacks(rsp); +- trace_rcu_utilization(TPS("End RCU core")); + } + ++static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); + /* + * Schedule RCU callback invocation. If the specified type of RCU + * does not support RCU priority boosting, just do a direct call, +@@ -3039,18 +3071,105 @@ + { + if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) + return; +- if (likely(!rsp->boost)) { +- rcu_do_batch(rsp, rdp); ++ rcu_do_batch(rsp, rdp); ++} ++ ++static void rcu_wake_cond(struct task_struct *t, int status) ++{ ++ /* ++ * If the thread is yielding, only wake it when this ++ * is invoked from idle ++ */ ++ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) ++ wake_up_process(t); ++} ++ ++/* ++ * Wake up this CPU's rcuc kthread to do RCU core processing. ++ */ ++static void invoke_rcu_core(void) ++{ ++ unsigned long flags; ++ struct task_struct *t; ++ ++ if (!cpu_online(smp_processor_id())) + return; ++ local_irq_save(flags); ++ __this_cpu_write(rcu_cpu_has_work, 1); ++ t = __this_cpu_read(rcu_cpu_kthread_task); ++ if (t != NULL && current != t) ++ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); ++ local_irq_restore(flags); ++} ++ ++static void rcu_cpu_kthread_park(unsigned int cpu) ++{ ++ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; ++} ++ ++static int rcu_cpu_kthread_should_run(unsigned int cpu) ++{ ++ return __this_cpu_read(rcu_cpu_has_work); ++} ++ ++/* ++ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the ++ * RCU softirq used in flavors and configurations of RCU that do not ++ * support RCU priority boosting. ++ */ ++static void rcu_cpu_kthread(unsigned int cpu) ++{ ++ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); ++ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); ++ int spincnt; ++ ++ for (spincnt = 0; spincnt < 10; spincnt++) { ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); ++ local_bh_disable(); ++ *statusp = RCU_KTHREAD_RUNNING; ++ this_cpu_inc(rcu_cpu_kthread_loops); ++ local_irq_disable(); ++ work = *workp; ++ *workp = 0; ++ local_irq_enable(); ++ if (work) ++ rcu_process_callbacks(); ++ local_bh_enable(); ++ if (*workp == 0) { ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); ++ *statusp = RCU_KTHREAD_WAITING; ++ return; ++ } + } +- invoke_rcu_callbacks_kthread(); ++ *statusp = RCU_KTHREAD_YIELDING; ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); ++ schedule_timeout_interruptible(2); ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); ++ *statusp = RCU_KTHREAD_WAITING; + } + +-static void invoke_rcu_core(void) ++static struct smp_hotplug_thread rcu_cpu_thread_spec = { ++ .store = &rcu_cpu_kthread_task, ++ .thread_should_run = rcu_cpu_kthread_should_run, ++ .thread_fn = rcu_cpu_kthread, ++ .thread_comm = "rcuc/%u", ++ .setup = rcu_cpu_kthread_setup, ++ .park = rcu_cpu_kthread_park, ++}; ++ ++/* ++ * Spawn per-CPU RCU core processing kthreads. ++ */ ++static int __init rcu_spawn_core_kthreads(void) + { +- if (cpu_online(smp_processor_id())) +- raise_softirq(RCU_SOFTIRQ); ++ int cpu; ++ ++ for_each_possible_cpu(cpu) ++ per_cpu(rcu_cpu_has_work, cpu) = 0; ++ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); ++ return 0; + } ++early_initcall(rcu_spawn_core_kthreads); + + /* + * Handle any core-RCU processing required by a call_rcu() invocation. +@@ -3195,6 +3314,7 @@ + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue an RCU callback for invocation after a quicker grace period. + */ +@@ -3203,6 +3323,7 @@ + __call_rcu(head, func, &rcu_bh_state, -1, 0); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + /* + * Queue an RCU callback for lazy invocation after a grace period. +@@ -3294,6 +3415,7 @@ + } + EXPORT_SYMBOL_GPL(synchronize_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * +@@ -3320,6 +3442,7 @@ + wait_rcu_gp(call_rcu_bh); + } + EXPORT_SYMBOL_GPL(synchronize_rcu_bh); ++#endif + + /** + * get_state_synchronize_rcu - Snapshot current RCU state +@@ -3698,6 +3821,7 @@ + mutex_unlock(&rsp->barrier_mutex); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +@@ -3706,6 +3830,7 @@ + _rcu_barrier(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_barrier_bh); ++#endif + + /** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. +@@ -4227,12 +4352,13 @@ + + rcu_bootup_announce(); + rcu_init_geometry(); ++#ifndef CONFIG_PREEMPT_RT_FULL + rcu_init_one(&rcu_bh_state); ++#endif + rcu_init_one(&rcu_sched_state); + if (dump_tree) + rcu_dump_rcu_node_tree(&rcu_sched_state); + __rcu_init_preempt(); +- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); + + /* + * We don't need protection against CPU-hotplug here because +diff -Nur linux-4.9.28.orig/kernel/rcu/tree.h linux-4.9.28/kernel/rcu/tree.h +--- linux-4.9.28.orig/kernel/rcu/tree.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/rcu/tree.h 2017-05-19 03:37:25.190177110 +0200 +@@ -588,18 +588,18 @@ + */ + extern struct rcu_state rcu_sched_state; + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern struct rcu_state rcu_bh_state; ++#endif + + #ifdef CONFIG_PREEMPT_RCU + extern struct rcu_state rcu_preempt_state; + #endif /* #ifdef CONFIG_PREEMPT_RCU */ + +-#ifdef CONFIG_RCU_BOOST + DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); + DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); + DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); + DECLARE_PER_CPU(char, rcu_cpu_has_work); +-#endif /* #ifdef CONFIG_RCU_BOOST */ + + #ifndef RCU_TREE_NONCORE + +@@ -619,10 +619,9 @@ + static void __init __rcu_init_preempt(void); + static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); + static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); +-static void invoke_rcu_callbacks_kthread(void); + static bool rcu_is_callbacks_kthread(void); ++static void rcu_cpu_kthread_setup(unsigned int cpu); + #ifdef CONFIG_RCU_BOOST +-static void rcu_preempt_do_callbacks(void); + static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + struct rcu_node *rnp); + #endif /* #ifdef CONFIG_RCU_BOOST */ +diff -Nur linux-4.9.28.orig/kernel/rcu/tree_plugin.h linux-4.9.28/kernel/rcu/tree_plugin.h +--- linux-4.9.28.orig/kernel/rcu/tree_plugin.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/rcu/tree_plugin.h 2017-05-19 03:37:25.190177110 +0200 +@@ -24,25 +24,10 @@ + * Paul E. McKenney <paulmck@linux.vnet.ibm.com> + */ + +-#include <linux/delay.h> +-#include <linux/gfp.h> +-#include <linux/oom.h> +-#include <linux/smpboot.h> +-#include "../time/tick-internal.h" +- + #ifdef CONFIG_RCU_BOOST + + #include "../locking/rtmutex_common.h" + +-/* +- * Control variables for per-CPU and per-rcu_node kthreads. These +- * handle all flavors of RCU. +- */ +-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); +-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); +-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); +-DEFINE_PER_CPU(char, rcu_cpu_has_work); +- + #else /* #ifdef CONFIG_RCU_BOOST */ + + /* +@@ -55,6 +40,14 @@ + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + ++/* ++ * Control variables for per-CPU and per-rcu_node kthreads. These ++ * handle all flavors of RCU. ++ */ ++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); ++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); ++DEFINE_PER_CPU(char, rcu_cpu_has_work); ++ + #ifdef CONFIG_RCU_NOCB_CPU + static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ + static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ +@@ -426,7 +419,7 @@ + } + + /* Hardware IRQ handlers cannot block, complain if they get here. */ +- if (in_irq() || in_serving_softirq()) { ++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + lockdep_rcu_suspicious(__FILE__, __LINE__, + "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); + pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", +@@ -632,15 +625,6 @@ + t->rcu_read_unlock_special.b.need_qs = true; + } + +-#ifdef CONFIG_RCU_BOOST +- +-static void rcu_preempt_do_callbacks(void) +-{ +- rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); +-} +- +-#endif /* #ifdef CONFIG_RCU_BOOST */ +- + /* + * Queue a preemptible-RCU callback for invocation after a grace period. + */ +@@ -829,6 +813,19 @@ + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++/* ++ * If boosting, set rcuc kthreads to realtime priority. ++ */ ++static void rcu_cpu_kthread_setup(unsigned int cpu) ++{ ++#ifdef CONFIG_RCU_BOOST ++ struct sched_param sp; ++ ++ sp.sched_priority = kthread_prio; ++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); ++#endif /* #ifdef CONFIG_RCU_BOOST */ ++} ++ + #ifdef CONFIG_RCU_BOOST + + #include "../locking/rtmutex_common.h" +@@ -860,16 +857,6 @@ + + #endif /* #else #ifdef CONFIG_RCU_TRACE */ + +-static void rcu_wake_cond(struct task_struct *t, int status) +-{ +- /* +- * If the thread is yielding, only wake it when this +- * is invoked from idle +- */ +- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) +- wake_up_process(t); +-} +- + /* + * Carry out RCU priority boosting on the task indicated by ->exp_tasks + * or ->boost_tasks, advancing the pointer to the next task in the +@@ -1013,23 +1000,6 @@ + } + + /* +- * Wake up the per-CPU kthread to invoke RCU callbacks. +- */ +-static void invoke_rcu_callbacks_kthread(void) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- __this_cpu_write(rcu_cpu_has_work, 1); +- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && +- current != __this_cpu_read(rcu_cpu_kthread_task)) { +- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), +- __this_cpu_read(rcu_cpu_kthread_status)); +- } +- local_irq_restore(flags); +-} +- +-/* + * Is the current CPU running the RCU-callbacks kthread? + * Caller must have preemption disabled. + */ +@@ -1083,67 +1053,6 @@ + return 0; + } + +-static void rcu_kthread_do_work(void) +-{ +- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); +- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); +- rcu_preempt_do_callbacks(); +-} +- +-static void rcu_cpu_kthread_setup(unsigned int cpu) +-{ +- struct sched_param sp; +- +- sp.sched_priority = kthread_prio; +- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +-} +- +-static void rcu_cpu_kthread_park(unsigned int cpu) +-{ +- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +-} +- +-static int rcu_cpu_kthread_should_run(unsigned int cpu) +-{ +- return __this_cpu_read(rcu_cpu_has_work); +-} +- +-/* +- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the +- * RCU softirq used in flavors and configurations of RCU that do not +- * support RCU priority boosting. +- */ +-static void rcu_cpu_kthread(unsigned int cpu) +-{ +- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); +- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); +- int spincnt; +- +- for (spincnt = 0; spincnt < 10; spincnt++) { +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); +- local_bh_disable(); +- *statusp = RCU_KTHREAD_RUNNING; +- this_cpu_inc(rcu_cpu_kthread_loops); +- local_irq_disable(); +- work = *workp; +- *workp = 0; +- local_irq_enable(); +- if (work) +- rcu_kthread_do_work(); +- local_bh_enable(); +- if (*workp == 0) { +- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); +- *statusp = RCU_KTHREAD_WAITING; +- return; +- } +- } +- *statusp = RCU_KTHREAD_YIELDING; +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); +- schedule_timeout_interruptible(2); +- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); +- *statusp = RCU_KTHREAD_WAITING; +-} +- + /* + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still +@@ -1174,26 +1083,12 @@ + free_cpumask_var(cm); + } + +-static struct smp_hotplug_thread rcu_cpu_thread_spec = { +- .store = &rcu_cpu_kthread_task, +- .thread_should_run = rcu_cpu_kthread_should_run, +- .thread_fn = rcu_cpu_kthread, +- .thread_comm = "rcuc/%u", +- .setup = rcu_cpu_kthread_setup, +- .park = rcu_cpu_kthread_park, +-}; +- + /* + * Spawn boost kthreads -- called as soon as the scheduler is running. + */ + static void __init rcu_spawn_boost_kthreads(void) + { + struct rcu_node *rnp; +- int cpu; +- +- for_each_possible_cpu(cpu) +- per_cpu(rcu_cpu_has_work, cpu) = 0; +- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + rcu_for_each_leaf_node(rcu_state_p, rnp) + (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); + } +@@ -1216,11 +1111,6 @@ + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } + +-static void invoke_rcu_callbacks_kthread(void) +-{ +- WARN_ON_ONCE(1); +-} +- + static bool rcu_is_callbacks_kthread(void) + { + return false; +@@ -1244,7 +1134,7 @@ + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + +-#if !defined(CONFIG_RCU_FAST_NO_HZ) ++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) + + /* + * Check to see if any future RCU-related work will need to be done +@@ -1261,7 +1151,9 @@ + return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) + ? 0 : rcu_cpu_has_callbacks(NULL); + } ++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ + ++#if !defined(CONFIG_RCU_FAST_NO_HZ) + /* + * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up + * after it. +@@ -1357,6 +1249,8 @@ + return cbs_ready; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready + * to invoke. If the CPU has callbacks, try to advance them. Tell the +@@ -1402,6 +1296,7 @@ + *nextevt = basemono + dj * TICK_NSEC; + return 0; + } ++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ + + /* + * Prepare a CPU for idle from an RCU perspective. The first major task +diff -Nur linux-4.9.28.orig/kernel/rcu/update.c linux-4.9.28/kernel/rcu/update.c +--- linux-4.9.28.orig/kernel/rcu/update.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/rcu/update.c 2017-05-19 03:37:25.190177110 +0200 +@@ -62,7 +62,7 @@ + #ifndef CONFIG_TINY_RCU + module_param(rcu_expedited, int, 0); + module_param(rcu_normal, int, 0); +-static int rcu_normal_after_boot; ++static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); + module_param(rcu_normal_after_boot, int, 0); + #endif /* #ifndef CONFIG_TINY_RCU */ + +@@ -132,8 +132,7 @@ + } + EXPORT_SYMBOL_GPL(rcu_gp_is_normal); + +-static atomic_t rcu_expedited_nesting = +- ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0); ++static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); + + /* + * Should normal grace-period primitives be expedited? Intended for +@@ -182,8 +181,7 @@ + */ + void rcu_end_inkernel_boot(void) + { +- if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT)) +- rcu_unexpedite_gp(); ++ rcu_unexpedite_gp(); + if (rcu_normal_after_boot) + WRITE_ONCE(rcu_normal, 1); + } +@@ -298,6 +296,7 @@ + } + EXPORT_SYMBOL_GPL(rcu_read_lock_held); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? + * +@@ -324,6 +323,7 @@ + return in_softirq() || irqs_disabled(); + } + EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ++#endif + + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +diff -Nur linux-4.9.28.orig/kernel/sched/completion.c linux-4.9.28/kernel/sched/completion.c +--- linux-4.9.28.orig/kernel/sched/completion.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/completion.c 2017-05-19 03:37:25.194177264 +0200 +@@ -30,10 +30,10 @@ + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done++; +- __wake_up_locked(&x->wait, TASK_NORMAL, 1); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ swake_up_locked(&x->wait); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete); + +@@ -50,10 +50,10 @@ + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; +- __wake_up_locked(&x->wait, TASK_NORMAL, 0); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ swake_up_all_locked(&x->wait); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete_all); + +@@ -62,20 +62,20 @@ + long (*action)(long), long timeout, int state) + { + if (!x->done) { +- DECLARE_WAITQUEUE(wait, current); ++ DECLARE_SWAITQUEUE(wait); + +- __add_wait_queue_tail_exclusive(&x->wait, &wait); ++ __prepare_to_swait(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } + __set_current_state(state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + timeout = action(timeout); +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); +- __remove_wait_queue(&x->wait, &wait); ++ __finish_swait(&x->wait, &wait); + if (!x->done) + return timeout; + } +@@ -89,9 +89,9 @@ + { + might_sleep(); + +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, action, timeout, state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + return timeout; + } + +@@ -277,12 +277,12 @@ + if (!READ_ONCE(x->done)) + return 0; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + else + x->done--; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(try_wait_for_completion); +@@ -311,7 +311,7 @@ + * after it's acquired the lock. + */ + smp_rmb(); +- spin_unlock_wait(&x->wait.lock); ++ raw_spin_unlock_wait(&x->wait.lock); + return true; + } + EXPORT_SYMBOL(completion_done); +diff -Nur linux-4.9.28.orig/kernel/sched/core.c linux-4.9.28/kernel/sched/core.c +--- linux-4.9.28.orig/kernel/sched/core.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/core.c 2017-05-19 03:37:25.194177264 +0200 +@@ -129,7 +129,11 @@ + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + const_debug unsigned int sysctl_sched_nr_migrate = 32; ++#else ++const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#endif + + /* + * period over which we average the RT time consumption, measured +@@ -345,6 +349,7 @@ + + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->hrtick_timer.function = hrtick; ++ rq->hrtick_timer.irqsafe = 1; + } + #else /* CONFIG_SCHED_HRTICK */ + static inline void hrtick_clear(struct rq *rq) +@@ -449,7 +454,7 @@ + head->lastp = &node->next; + } + +-void wake_up_q(struct wake_q_head *head) ++void __wake_up_q(struct wake_q_head *head, bool sleeper) + { + struct wake_q_node *node = head->first; + +@@ -466,7 +471,10 @@ + * wake_up_process() implies a wmb() to pair with the queueing + * in wake_q_add() so as not to miss wakeups. + */ +- wake_up_process(task); ++ if (sleeper) ++ wake_up_lock_sleeper(task); ++ else ++ wake_up_process(task); + put_task_struct(task); + } + } +@@ -502,6 +510,38 @@ + trace_sched_wake_idle_without_ipi(cpu); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++void resched_curr_lazy(struct rq *rq) ++{ ++ struct task_struct *curr = rq->curr; ++ int cpu; ++ ++ if (!sched_feat(PREEMPT_LAZY)) { ++ resched_curr(rq); ++ return; ++ } ++ ++ lockdep_assert_held(&rq->lock); ++ ++ if (test_tsk_need_resched(curr)) ++ return; ++ ++ if (test_tsk_need_resched_lazy(curr)) ++ return; ++ ++ set_tsk_need_resched_lazy(curr); ++ ++ cpu = cpu_of(rq); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED_LAZY must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(curr)) ++ smp_send_reschedule(cpu); ++} ++#endif ++ + void resched_cpu(int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -525,11 +565,14 @@ + */ + int get_nohz_timer_target(void) + { +- int i, cpu = smp_processor_id(); ++ int i, cpu; + struct sched_domain *sd; + ++ preempt_disable_rt(); ++ cpu = smp_processor_id(); ++ + if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) +- return cpu; ++ goto preempt_en_rt; + + rcu_read_lock(); + for_each_domain(cpu, sd) { +@@ -548,6 +591,8 @@ + cpu = housekeeping_any_cpu(); + unlock: + rcu_read_unlock(); ++preempt_en_rt: ++ preempt_enable_rt(); + return cpu; + } + /* +@@ -1100,6 +1145,11 @@ + + lockdep_assert_held(&p->pi_lock); + ++ if (__migrate_disabled(p)) { ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ return; ++ } ++ + queued = task_on_rq_queued(p); + running = task_current(rq, p); + +@@ -1122,6 +1172,84 @@ + set_curr_task(rq, p); + } + ++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); ++static DEFINE_MUTEX(sched_down_mutex); ++static cpumask_t sched_down_cpumask; ++ ++void tell_sched_cpu_down_begin(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_set_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++void tell_sched_cpu_down_done(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_clear_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++/** ++ * migrate_me - try to move the current task off this cpu ++ * ++ * Used by the pin_current_cpu() code to try to get tasks ++ * to move off the current CPU as it is going down. ++ * It will only move the task if the task isn't pinned to ++ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) ++ * and the task has to be in a RUNNING state. Otherwise the ++ * movement of the task will wake it up (change its state ++ * to running) when the task did not expect it. ++ * ++ * Returns 1 if it succeeded in moving the current task ++ * 0 otherwise. ++ */ ++int migrate_me(void) ++{ ++ struct task_struct *p = current; ++ struct migration_arg arg; ++ struct cpumask *cpumask; ++ struct cpumask *mask; ++ unsigned int dest_cpu; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ /* ++ * We can not migrate tasks bounded to a CPU or tasks not ++ * running. The movement of the task will wake it up. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY || p->state) ++ return 0; ++ ++ mutex_lock(&sched_down_mutex); ++ rq = task_rq_lock(p, &rf); ++ ++ cpumask = this_cpu_ptr(&sched_cpumasks); ++ mask = &p->cpus_allowed; ++ ++ cpumask_andnot(cpumask, mask, &sched_down_cpumask); ++ ++ if (!cpumask_weight(cpumask)) { ++ /* It's only on this CPU? */ ++ task_rq_unlock(rq, p, &rf); ++ mutex_unlock(&sched_down_mutex); ++ return 0; ++ } ++ ++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); ++ ++ arg.task = p; ++ arg.dest_cpu = dest_cpu; ++ ++ task_rq_unlock(rq, p, &rf); ++ ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ tlb_migrate_finish(p->mm); ++ mutex_unlock(&sched_down_mutex); ++ ++ return 1; ++} ++ + /* + * Change a given task's CPU affinity. Migrate the thread to a + * proper CPU and schedule it away if the CPU it's executing on +@@ -1179,7 +1307,7 @@ + } + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + goto out; + + dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); +@@ -1366,6 +1494,18 @@ + return ret; + } + ++static bool check_task_state(struct task_struct *p, long match_state) ++{ ++ bool match = false; ++ ++ raw_spin_lock_irq(&p->pi_lock); ++ if (p->state == match_state || p->saved_state == match_state) ++ match = true; ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ return match; ++} ++ + /* + * wait_task_inactive - wait for a thread to unschedule. + * +@@ -1410,7 +1550,7 @@ + * is actually now running somewhere else! + */ + while (task_running(rq, p)) { +- if (match_state && unlikely(p->state != match_state)) ++ if (match_state && !check_task_state(p, match_state)) + return 0; + cpu_relax(); + } +@@ -1425,7 +1565,8 @@ + running = task_running(rq, p); + queued = task_on_rq_queued(p); + ncsw = 0; +- if (!match_state || p->state == match_state) ++ if (!match_state || p->state == match_state || ++ p->saved_state == match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_rq_unlock(rq, p, &rf); + +@@ -1680,10 +1821,6 @@ + { + activate_task(rq, p, en_flags); + p->on_rq = TASK_ON_RQ_QUEUED; +- +- /* if a worker is waking up, notify workqueue */ +- if (p->flags & PF_WQ_WORKER) +- wq_worker_waking_up(p, cpu_of(rq)); + } + + /* +@@ -2018,8 +2155,27 @@ + */ + smp_mb__before_spinlock(); + raw_spin_lock_irqsave(&p->pi_lock, flags); +- if (!(p->state & state)) ++ if (!(p->state & state)) { ++ /* ++ * The task might be running due to a spinlock sleeper ++ * wakeup. Check the saved state and set it to running ++ * if the wakeup condition is true. ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) { ++ if (p->saved_state & state) { ++ p->saved_state = TASK_RUNNING; ++ success = 1; ++ } ++ } + goto out; ++ } ++ ++ /* ++ * If this is a regular wakeup, then we can unconditionally ++ * clear the saved state of a "lock sleeper". ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) ++ p->saved_state = TASK_RUNNING; + + trace_sched_waking(p); + +@@ -2102,53 +2258,6 @@ + } + + /** +- * try_to_wake_up_local - try to wake up a local task with rq lock held +- * @p: the thread to be awakened +- * @cookie: context's cookie for pinning +- * +- * Put @p on the run-queue if it's not already there. The caller must +- * ensure that this_rq() is locked, @p is bound to this_rq() and not +- * the current task. +- */ +-static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie) +-{ +- struct rq *rq = task_rq(p); +- +- if (WARN_ON_ONCE(rq != this_rq()) || +- WARN_ON_ONCE(p == current)) +- return; +- +- lockdep_assert_held(&rq->lock); +- +- if (!raw_spin_trylock(&p->pi_lock)) { +- /* +- * This is OK, because current is on_cpu, which avoids it being +- * picked for load-balance and preemption/IRQs are still +- * disabled avoiding further scheduler activity on it and we've +- * not yet picked a replacement task. +- */ +- lockdep_unpin_lock(&rq->lock, cookie); +- raw_spin_unlock(&rq->lock); +- raw_spin_lock(&p->pi_lock); +- raw_spin_lock(&rq->lock); +- lockdep_repin_lock(&rq->lock, cookie); +- } +- +- if (!(p->state & TASK_NORMAL)) +- goto out; +- +- trace_sched_waking(p); +- +- if (!task_on_rq_queued(p)) +- ttwu_activate(rq, p, ENQUEUE_WAKEUP); +- +- ttwu_do_wakeup(rq, p, 0, cookie); +- ttwu_stat(p, smp_processor_id(), 0); +-out: +- raw_spin_unlock(&p->pi_lock); +-} +- +-/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * +@@ -2166,6 +2275,18 @@ + } + EXPORT_SYMBOL(wake_up_process); + ++/** ++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" ++ * @p: The process to be woken up. ++ * ++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate ++ * the nature of the wakeup. ++ */ ++int wake_up_lock_sleeper(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); ++} ++ + int wake_up_state(struct task_struct *p, unsigned int state) + { + return try_to_wake_up(p, state, 0); +@@ -2442,6 +2563,9 @@ + p->on_cpu = 0; + #endif + init_task_preempt_count(p); ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(p)->preempt_lazy_count = 0; ++#endif + #ifdef CONFIG_SMP + plist_node_init(&p->pushable_tasks, MAX_PRIO); + RB_CLEAR_NODE(&p->pushable_dl_tasks); +@@ -2770,21 +2894,16 @@ + finish_arch_post_lock_switch(); + + fire_sched_in_preempt_notifiers(current); ++ /* ++ * We use mmdrop_delayed() here so we don't have to do the ++ * full __mmdrop() when we are the last user. ++ */ + if (mm) +- mmdrop(mm); ++ mmdrop_delayed(mm); + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); + +- /* +- * Remove function-return probe instances associated with this +- * task and put them back on the free list. +- */ +- kprobe_flush_task(prev); +- +- /* Task is done with its stack. */ +- put_task_stack(prev); +- + put_task_struct(prev); + } + +@@ -3252,6 +3371,77 @@ + schedstat_inc(this_rq()->sched_count); + } + ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || irqs_disabled()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } ++#endif ++ ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ return; ++ } ++ ++ preempt_disable(); ++ preempt_lazy_disable(); ++ pin_current_cpu(); ++ p->migrate_disable = 1; ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || irqs_disabled()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } ++#endif ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ if (p->migrate_disable > 1) { ++ p->migrate_disable--; ++ return; ++ } ++ ++ preempt_disable(); ++ /* ++ * Clearing migrate_disable causes tsk_cpus_allowed to ++ * show the tasks original cpu affinity. ++ */ ++ p->migrate_disable = 0; ++ ++ unpin_current_cpu(); ++ preempt_enable(); ++ preempt_lazy_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++#endif ++ + /* + * Pick up the highest-prio task: + */ +@@ -3368,19 +3558,6 @@ + } else { + deactivate_task(rq, prev, DEQUEUE_SLEEP); + prev->on_rq = 0; +- +- /* +- * If a worker went to sleep, notify and ask workqueue +- * whether it wants to wake up a task to maintain +- * concurrency. +- */ +- if (prev->flags & PF_WQ_WORKER) { +- struct task_struct *to_wakeup; +- +- to_wakeup = wq_worker_sleeping(prev); +- if (to_wakeup) +- try_to_wake_up_local(to_wakeup, cookie); +- } + } + switch_count = &prev->nvcsw; + } +@@ -3390,6 +3567,7 @@ + + next = pick_next_task(rq, prev, cookie); + clear_tsk_need_resched(prev); ++ clear_tsk_need_resched_lazy(prev); + clear_preempt_need_resched(); + rq->clock_skip_update = 0; + +@@ -3437,9 +3615,20 @@ + + static inline void sched_submit_work(struct task_struct *tsk) + { +- if (!tsk->state || tsk_is_pi_blocked(tsk)) ++ if (!tsk->state) + return; + /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ */ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ ++ ++ if (tsk_is_pi_blocked(tsk)) ++ return; ++ ++ /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. + */ +@@ -3447,6 +3636,12 @@ + blk_schedule_flush_plug(tsk); + } + ++static void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++} ++ + asmlinkage __visible void __sched schedule(void) + { + struct task_struct *tsk = current; +@@ -3457,6 +3652,7 @@ + __schedule(false); + sched_preempt_enable_no_resched(); + } while (need_resched()); ++ sched_update_worker(tsk); + } + EXPORT_SYMBOL(schedule); + +@@ -3520,6 +3716,30 @@ + } while (need_resched()); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++/* ++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is ++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as ++ * preempt_lazy_count counter >0. ++ */ ++static __always_inline int preemptible_lazy(void) ++{ ++ if (test_thread_flag(TIF_NEED_RESCHED)) ++ return 1; ++ if (current_thread_info()->preempt_lazy_count) ++ return 0; ++ return 1; ++} ++ ++#else ++ ++static inline int preemptible_lazy(void) ++{ ++ return 1; ++} ++ ++#endif ++ + #ifdef CONFIG_PREEMPT + /* + * this is the entry point to schedule() from in-kernel preemption +@@ -3534,7 +3754,8 @@ + */ + if (likely(!preemptible())) + return; +- ++ if (!preemptible_lazy()) ++ return; + preempt_schedule_common(); + } + NOKPROBE_SYMBOL(preempt_schedule); +@@ -3561,6 +3782,9 @@ + if (likely(!preemptible())) + return; + ++ if (!preemptible_lazy()) ++ return; ++ + do { + /* + * Because the function tracer can trace preempt_count_sub() +@@ -3583,7 +3807,16 @@ + * an infinite recursion. + */ + prev_ctx = exception_enter(); ++ /* ++ * The add/subtract must not be traced by the function ++ * tracer. But we still want to account for the ++ * preempt off latency tracer. Since the _notrace versions ++ * of add/subtract skip the accounting for latency tracer ++ * we must force it manually. ++ */ ++ start_critical_timings(); + __schedule(true); ++ stop_critical_timings(); + exception_exit(prev_ctx); + + preempt_latency_stop(1); +@@ -3629,10 +3862,25 @@ + + #ifdef CONFIG_RT_MUTEXES + ++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) ++{ ++ if (pi_task) ++ prio = min(prio, pi_task->prio); ++ ++ return prio; ++} ++ ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ struct task_struct *pi_task = rt_mutex_get_top_task(p); ++ ++ return __rt_effective_prio(pi_task, prio); ++} ++ + /* + * rt_mutex_setprio - set the current priority of a task +- * @p: task +- * @prio: prio value (kernel-internal form) ++ * @p: task to boost ++ * @pi_task: donor task + * + * This function changes the 'effective' priority of a task. It does + * not touch ->normal_prio like __setscheduler(). +@@ -3640,16 +3888,40 @@ + * Used by the rt_mutex code to implement priority inheritance + * logic. Call site only calls if the priority of the task changed. + */ +-void rt_mutex_setprio(struct task_struct *p, int prio) ++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) + { +- int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; ++ int prio, oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; + const struct sched_class *prev_class; + struct rq_flags rf; + struct rq *rq; + +- BUG_ON(prio > MAX_PRIO); ++ /* XXX used to be waiter->prio, not waiter->task->prio */ ++ prio = __rt_effective_prio(pi_task, p->normal_prio); ++ ++ /* ++ * If nothing changed; bail early. ++ */ ++ if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) ++ return; + + rq = __task_rq_lock(p, &rf); ++ /* ++ * Set under pi_lock && rq->lock, such that the value can be used under ++ * either lock. ++ * ++ * Note that there is loads of tricky to make this pointer cache work ++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to ++ * ensure a task is de-boosted (pi_task is set to NULL) before the ++ * task is allowed to run again (and can exit). This ensures the pointer ++ * points to a blocked task -- which guaratees the task is present. ++ */ ++ p->pi_top_task = pi_task; ++ ++ /* ++ * For FIFO/RR we only need to set prio, if that matches we're done. ++ */ ++ if (prio == p->prio && !dl_prio(prio)) ++ goto out_unlock; + + /* + * Idle task boosting is a nono in general. There is one +@@ -3669,7 +3941,7 @@ + goto out_unlock; + } + +- trace_sched_pi_setprio(p, prio); ++ trace_sched_pi_setprio(p, pi_task); + oldprio = p->prio; + + if (oldprio == prio) +@@ -3693,7 +3965,6 @@ + * running task + */ + if (dl_prio(prio)) { +- struct task_struct *pi_task = rt_mutex_get_top_task(p); + if (!dl_prio(p->normal_prio) || + (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + p->dl.dl_boosted = 1; +@@ -3730,6 +4001,11 @@ + balance_callback(rq); + preempt_enable(); + } ++#else ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ return prio; ++} + #endif + + void set_user_nice(struct task_struct *p, long nice) +@@ -3974,10 +4250,9 @@ + * Keep a potential priority boosting if called from + * sched_setscheduler(). + */ ++ p->prio = normal_prio(p); + if (keep_boost) +- p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); +- else +- p->prio = normal_prio(p); ++ p->prio = rt_effective_prio(p, p->prio); + + if (dl_prio(p->prio)) + p->sched_class = &dl_sched_class; +@@ -4264,7 +4539,7 @@ + * the runqueue. This will be done when the task deboost + * itself. + */ +- new_effective_prio = rt_mutex_get_effective_prio(p, newprio); ++ new_effective_prio = rt_effective_prio(p, newprio); + if (new_effective_prio == oldprio) + queue_flags &= ~DEQUEUE_MOVE; + } +@@ -4939,6 +5214,7 @@ + } + EXPORT_SYMBOL(__cond_resched_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL + int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); +@@ -4952,6 +5228,7 @@ + return 0; + } + EXPORT_SYMBOL(__cond_resched_softirq); ++#endif + + /** + * yield - yield the current processor to other threads. +@@ -5315,7 +5592,9 @@ + + /* Set the preempt count _outside_ the spinlocks! */ + init_idle_preempt_count(idle, cpu); +- ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(idle)->preempt_lazy_count = 0; ++#endif + /* + * The idle tasks have their own, simple scheduling class: + */ +@@ -5458,6 +5737,8 @@ + #endif /* CONFIG_NUMA_BALANCING */ + + #ifdef CONFIG_HOTPLUG_CPU ++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); ++ + /* + * Ensures that the idle task is using init_mm right before its cpu goes + * offline. +@@ -5472,7 +5753,12 @@ + switch_mm_irqs_off(mm, &init_mm, current); + finish_arch_post_lock_switch(); + } +- mmdrop(mm); ++ /* ++ * Defer the cleanup to an alive cpu. On RT we can neither ++ * call mmdrop() nor mmdrop_delayed() from here. ++ */ ++ per_cpu(idle_last_mm, smp_processor_id()) = mm; ++ + } + + /* +@@ -7418,6 +7704,10 @@ + update_max_interval(); + nohz_balance_exit_idle(cpu); + hrtick_clear(rq); ++ if (per_cpu(idle_last_mm, cpu)) { ++ mmdrop_delayed(per_cpu(idle_last_mm, cpu)); ++ per_cpu(idle_last_mm, cpu) = NULL; ++ } + return 0; + } + #endif +@@ -7698,7 +7988,7 @@ + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + static inline int preempt_count_equals(int preempt_offset) + { +- int nested = preempt_count() + rcu_preempt_depth(); ++ int nested = preempt_count() + sched_rcu_preempt_depth(); + + return (nested == preempt_offset); + } +diff -Nur linux-4.9.28.orig/kernel/sched/deadline.c linux-4.9.28/kernel/sched/deadline.c +--- linux-4.9.28.orig/kernel/sched/deadline.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/deadline.c 2017-05-19 03:37:25.194177264 +0200 +@@ -687,6 +687,7 @@ + + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + timer->function = dl_task_timer; ++ timer->irqsafe = 1; + } + + static +diff -Nur linux-4.9.28.orig/kernel/sched/debug.c linux-4.9.28/kernel/sched/debug.c +--- linux-4.9.28.orig/kernel/sched/debug.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/debug.c 2017-05-19 03:37:25.194177264 +0200 +@@ -558,6 +558,9 @@ + P(rt_throttled); + PN(rt_time); + PN(rt_runtime); ++#ifdef CONFIG_SMP ++ P(rt_nr_migratory); ++#endif + + #undef PN + #undef P +@@ -953,6 +956,10 @@ + #endif + P(policy); + P(prio); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ P(migrate_disable); ++#endif ++ P(nr_cpus_allowed); + #undef PN_SCHEDSTAT + #undef PN + #undef __PN +diff -Nur linux-4.9.28.orig/kernel/sched/fair.c linux-4.9.28/kernel/sched/fair.c +--- linux-4.9.28.orig/kernel/sched/fair.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/fair.c 2017-05-19 03:37:25.194177264 +0200 +@@ -3518,7 +3518,7 @@ + ideal_runtime = sched_slice(cfs_rq, curr); + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) { +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + /* + * The current task ran long enough, ensure it doesn't get + * re-elected due to buddy favours. +@@ -3542,7 +3542,7 @@ + return; + + if (delta > ideal_runtime) +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + } + + static void +@@ -3684,7 +3684,7 @@ + * validating it and just reschedule. + */ + if (queued) { +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + return; + } + /* +@@ -3866,7 +3866,7 @@ + * hierarchy can be throttled + */ + if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + } + + static __always_inline +@@ -4494,7 +4494,7 @@ + + if (delta < 0) { + if (rq->curr == p) +- resched_curr(rq); ++ resched_curr_lazy(rq); + return; + } + hrtick_start(rq, delta); +@@ -5905,7 +5905,7 @@ + return; + + preempt: +- resched_curr(rq); ++ resched_curr_lazy(rq); + /* + * Only set the backward buddy when the current task is still + * on the rq. This can happen when a wakeup gets interleaved +@@ -8631,7 +8631,7 @@ + * 'current' within the tree based on its new key value. + */ + swap(curr->vruntime, se->vruntime); +- resched_curr(rq); ++ resched_curr_lazy(rq); + } + + se->vruntime -= cfs_rq->min_vruntime; +@@ -8655,7 +8655,7 @@ + */ + if (rq->curr == p) { + if (p->prio > oldprio) +- resched_curr(rq); ++ resched_curr_lazy(rq); + } else + check_preempt_curr(rq, p, 0); + } +diff -Nur linux-4.9.28.orig/kernel/sched/features.h linux-4.9.28/kernel/sched/features.h +--- linux-4.9.28.orig/kernel/sched/features.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/features.h 2017-05-19 03:37:25.194177264 +0200 +@@ -45,11 +45,19 @@ + */ + SCHED_FEAT(NONTASK_CAPACITY, true) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++SCHED_FEAT(TTWU_QUEUE, false) ++# ifdef CONFIG_PREEMPT_LAZY ++SCHED_FEAT(PREEMPT_LAZY, true) ++# endif ++#else ++ + /* + * Queue remote wakeups on the target CPU and process them + * using the scheduler IPI. Reduces rq->lock contention/bounces. + */ + SCHED_FEAT(TTWU_QUEUE, true) ++#endif + + #ifdef HAVE_RT_PUSH_IPI + /* +diff -Nur linux-4.9.28.orig/kernel/sched/Makefile linux-4.9.28/kernel/sched/Makefile +--- linux-4.9.28.orig/kernel/sched/Makefile 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/Makefile 2017-05-19 03:37:25.194177264 +0200 +@@ -17,7 +17,7 @@ + + obj-y += core.o loadavg.o clock.o cputime.o + obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o +-obj-y += wait.o swait.o completion.o idle.o ++obj-y += wait.o swait.o swork.o completion.o idle.o + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o + obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o + obj-$(CONFIG_SCHEDSTATS) += stats.o +diff -Nur linux-4.9.28.orig/kernel/sched/rt.c linux-4.9.28/kernel/sched/rt.c +--- linux-4.9.28.orig/kernel/sched/rt.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/rt.c 2017-05-19 03:37:25.194177264 +0200 +@@ -47,6 +47,7 @@ + + hrtimer_init(&rt_b->rt_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rt_b->rt_period_timer.irqsafe = 1; + rt_b->rt_period_timer.function = sched_rt_period_timer; + } + +@@ -101,6 +102,7 @@ + rt_rq->push_cpu = nr_cpu_ids; + raw_spin_lock_init(&rt_rq->push_lock); + init_irq_work(&rt_rq->push_work, push_irq_work_func); ++ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ; + #endif + #endif /* CONFIG_SMP */ + /* We start is dequeued state, because no RT tasks are queued */ +diff -Nur linux-4.9.28.orig/kernel/sched/sched.h linux-4.9.28/kernel/sched/sched.h +--- linux-4.9.28.orig/kernel/sched/sched.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/sched.h 2017-05-19 03:37:25.194177264 +0200 +@@ -1163,6 +1163,7 @@ + #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ + #define WF_FORK 0x02 /* child wakeup after fork */ + #define WF_MIGRATED 0x4 /* internal use, task got migrated */ ++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ + + /* + * To aid in avoiding the subversion of "niceness" due to uneven distribution +@@ -1346,6 +1347,15 @@ + extern void resched_curr(struct rq *rq); + extern void resched_cpu(int cpu); + ++#ifdef CONFIG_PREEMPT_LAZY ++extern void resched_curr_lazy(struct rq *rq); ++#else ++static inline void resched_curr_lazy(struct rq *rq) ++{ ++ resched_curr(rq); ++} ++#endif ++ + extern struct rt_bandwidth def_rt_bandwidth; + extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); + +diff -Nur linux-4.9.28.orig/kernel/sched/swait.c linux-4.9.28/kernel/sched/swait.c +--- linux-4.9.28.orig/kernel/sched/swait.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/sched/swait.c 2017-05-19 03:37:25.198177419 +0200 +@@ -1,5 +1,6 @@ + #include <linux/sched.h> + #include <linux/swait.h> ++#include <linux/suspend.h> + + void __init_swait_queue_head(struct swait_queue_head *q, const char *name, + struct lock_class_key *key) +@@ -29,6 +30,25 @@ + } + EXPORT_SYMBOL(swake_up_locked); + ++void swake_up_all_locked(struct swait_queue_head *q) ++{ ++ struct swait_queue *curr; ++ int wakes = 0; ++ ++ while (!list_empty(&q->task_list)) { ++ ++ curr = list_first_entry(&q->task_list, typeof(*curr), ++ task_list); ++ wake_up_process(curr->task); ++ list_del_init(&curr->task_list); ++ wakes++; ++ } ++ if (pm_in_action) ++ return; ++ WARN(wakes > 2, "complete_all() with %d waiters\n", wakes); ++} ++EXPORT_SYMBOL(swake_up_all_locked); ++ + void swake_up(struct swait_queue_head *q) + { + unsigned long flags; +@@ -54,6 +74,7 @@ + if (!swait_active(q)) + return; + ++ WARN_ON(irqs_disabled()); + raw_spin_lock_irq(&q->lock); + list_splice_init(&q->task_list, &tmp); + while (!list_empty(&tmp)) { +diff -Nur linux-4.9.28.orig/kernel/sched/swork.c linux-4.9.28/kernel/sched/swork.c +--- linux-4.9.28.orig/kernel/sched/swork.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/kernel/sched/swork.c 2017-05-19 03:37:25.198177419 +0200 +@@ -0,0 +1,173 @@ ++/* ++ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de ++ * ++ * Provides a framework for enqueuing callbacks from irq context ++ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. ++ */ ++ ++#include <linux/swait.h> ++#include <linux/swork.h> ++#include <linux/kthread.h> ++#include <linux/slab.h> ++#include <linux/spinlock.h> ++#include <linux/export.h> ++ ++#define SWORK_EVENT_PENDING (1 << 0) ++ ++static DEFINE_MUTEX(worker_mutex); ++static struct sworker *glob_worker; ++ ++struct sworker { ++ struct list_head events; ++ struct swait_queue_head wq; ++ ++ raw_spinlock_t lock; ++ ++ struct task_struct *task; ++ int refs; ++}; ++ ++static bool swork_readable(struct sworker *worker) ++{ ++ bool r; ++ ++ if (kthread_should_stop()) ++ return true; ++ ++ raw_spin_lock_irq(&worker->lock); ++ r = !list_empty(&worker->events); ++ raw_spin_unlock_irq(&worker->lock); ++ ++ return r; ++} ++ ++static int swork_kthread(void *arg) ++{ ++ struct sworker *worker = arg; ++ ++ for (;;) { ++ swait_event_interruptible(worker->wq, ++ swork_readable(worker)); ++ if (kthread_should_stop()) ++ break; ++ ++ raw_spin_lock_irq(&worker->lock); ++ while (!list_empty(&worker->events)) { ++ struct swork_event *sev; ++ ++ sev = list_first_entry(&worker->events, ++ struct swork_event, item); ++ list_del(&sev->item); ++ raw_spin_unlock_irq(&worker->lock); ++ ++ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, ++ &sev->flags)); ++ sev->func(sev); ++ raw_spin_lock_irq(&worker->lock); ++ } ++ raw_spin_unlock_irq(&worker->lock); ++ } ++ return 0; ++} ++ ++static struct sworker *swork_create(void) ++{ ++ struct sworker *worker; ++ ++ worker = kzalloc(sizeof(*worker), GFP_KERNEL); ++ if (!worker) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_LIST_HEAD(&worker->events); ++ raw_spin_lock_init(&worker->lock); ++ init_swait_queue_head(&worker->wq); ++ ++ worker->task = kthread_run(swork_kthread, worker, "kswork"); ++ if (IS_ERR(worker->task)) { ++ kfree(worker); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ return worker; ++} ++ ++static void swork_destroy(struct sworker *worker) ++{ ++ kthread_stop(worker->task); ++ ++ WARN_ON(!list_empty(&worker->events)); ++ kfree(worker); ++} ++ ++/** ++ * swork_queue - queue swork ++ * ++ * Returns %false if @work was already on a queue, %true otherwise. ++ * ++ * The work is queued and processed on a random CPU ++ */ ++bool swork_queue(struct swork_event *sev) ++{ ++ unsigned long flags; ++ ++ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) ++ return false; ++ ++ raw_spin_lock_irqsave(&glob_worker->lock, flags); ++ list_add_tail(&sev->item, &glob_worker->events); ++ raw_spin_unlock_irqrestore(&glob_worker->lock, flags); ++ ++ swake_up(&glob_worker->wq); ++ return true; ++} ++EXPORT_SYMBOL_GPL(swork_queue); ++ ++/** ++ * swork_get - get an instance of the sworker ++ * ++ * Returns an negative error code if the initialization if the worker did not ++ * work, %0 otherwise. ++ * ++ */ ++int swork_get(void) ++{ ++ struct sworker *worker; ++ ++ mutex_lock(&worker_mutex); ++ if (!glob_worker) { ++ worker = swork_create(); ++ if (IS_ERR(worker)) { ++ mutex_unlock(&worker_mutex); ++ return -ENOMEM; ++ } ++ ++ glob_worker = worker; ++ } ++ ++ glob_worker->refs++; ++ mutex_unlock(&worker_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(swork_get); ++ ++/** ++ * swork_put - puts an instance of the sworker ++ * ++ * Will destroy the sworker thread. This function must not be called until all ++ * queued events have been completed. ++ */ ++void swork_put(void) ++{ ++ mutex_lock(&worker_mutex); ++ ++ glob_worker->refs--; ++ if (glob_worker->refs > 0) ++ goto out; ++ ++ swork_destroy(glob_worker); ++ glob_worker = NULL; ++out: ++ mutex_unlock(&worker_mutex); ++} ++EXPORT_SYMBOL_GPL(swork_put); +diff -Nur linux-4.9.28.orig/kernel/signal.c linux-4.9.28/kernel/signal.c +--- linux-4.9.28.orig/kernel/signal.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/signal.c 2017-05-19 03:37:25.198177419 +0200 +@@ -14,6 +14,7 @@ + #include <linux/export.h> + #include <linux/init.h> + #include <linux/sched.h> ++#include <linux/sched/rt.h> + #include <linux/fs.h> + #include <linux/tty.h> + #include <linux/binfmts.h> +@@ -352,13 +353,30 @@ + return false; + } + ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ struct sigqueue *q = t->sigqueue_cache; ++ ++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) ++ return NULL; ++ return q; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) ++ return 0; ++ return 1; ++} ++ + /* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ + static struct sigqueue * +-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) ++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit, int fromslab) + { + struct sigqueue *q = NULL; + struct user_struct *user; +@@ -375,7 +393,10 @@ + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +- q = kmem_cache_alloc(sigqueue_cachep, flags); ++ if (!fromslab) ++ q = get_task_cache(t); ++ if (!q) ++ q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } +@@ -392,6 +413,13 @@ + return q; + } + ++static struct sigqueue * ++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit) ++{ ++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); ++} ++ + static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) +@@ -401,6 +429,21 @@ + kmem_cache_free(sigqueue_cachep, q); + } + ++static void sigqueue_free_current(struct sigqueue *q) ++{ ++ struct user_struct *up; ++ ++ if (q->flags & SIGQUEUE_PREALLOC) ++ return; ++ ++ up = q->user; ++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { ++ atomic_dec(&up->sigpending); ++ free_uid(up); ++ } else ++ __sigqueue_free(q); ++} ++ + void flush_sigqueue(struct sigpending *queue) + { + struct sigqueue *q; +@@ -414,6 +457,21 @@ + } + + /* ++ * Called from __exit_signal. Flush tsk->pending and ++ * tsk->sigqueue_cache ++ */ ++void flush_task_sigqueue(struct task_struct *tsk) ++{ ++ struct sigqueue *q; ++ ++ flush_sigqueue(&tsk->pending); ++ ++ q = get_task_cache(tsk); ++ if (q) ++ kmem_cache_free(sigqueue_cachep, q); ++} ++ ++/* + * Flush all pending signals for this kthread. + */ + void flush_signals(struct task_struct *t) +@@ -525,7 +583,7 @@ + still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); +- __sigqueue_free(first); ++ sigqueue_free_current(first); + } else { + /* + * Ok, it wasn't in the queue. This must be +@@ -560,6 +618,8 @@ + { + int signr; + ++ WARN_ON_ONCE(tsk != current); ++ + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +@@ -1156,8 +1216,8 @@ + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. + */ +-int +-force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++static int ++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + unsigned long int flags; + int ret, blocked, ignored; +@@ -1182,6 +1242,39 @@ + return ret; + } + ++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++{ ++/* ++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap ++ * since it can not enable preemption, and the signal code's spin_locks ++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will ++ * send the signal on exit of the trap. ++ */ ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (in_atomic()) { ++ if (WARN_ON_ONCE(t != current)) ++ return 0; ++ if (WARN_ON_ONCE(t->forced_info.si_signo)) ++ return 0; ++ ++ if (is_si_special(info)) { ++ WARN_ON_ONCE(info != SEND_SIG_PRIV); ++ t->forced_info.si_signo = sig; ++ t->forced_info.si_errno = 0; ++ t->forced_info.si_code = SI_KERNEL; ++ t->forced_info.si_pid = 0; ++ t->forced_info.si_uid = 0; ++ } else { ++ t->forced_info = *info; ++ } ++ ++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); ++ return 0; ++ } ++#endif ++ return do_force_sig_info(sig, info, t); ++} ++ + /* + * Nuke all other threads in the group. + */ +@@ -1216,12 +1309,12 @@ + * Disable interrupts early to avoid deadlocks. + * See rcu_read_unlock() comment header for details. + */ +- local_irq_save(*flags); ++ local_irq_save_nort(*flags); + rcu_read_lock(); + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) { + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + break; + } + /* +@@ -1242,7 +1335,7 @@ + } + spin_unlock(&sighand->siglock); + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + } + + return sighand; +@@ -1485,7 +1578,8 @@ + */ + struct sigqueue *sigqueue_alloc(void) + { +- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); ++ /* Preallocated sigqueue objects always from the slabcache ! */ ++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + + if (q) + q->flags |= SIGQUEUE_PREALLOC; +@@ -1846,15 +1940,7 @@ + if (gstop_done && ptrace_reparented(current)) + do_notify_parent_cldstop(current, false, why); + +- /* +- * Don't want to allow preemption here, because +- * sys_ptrace() needs this task to be inactive. +- * +- * XXX: implement read_unlock_no_resched(). +- */ +- preempt_disable(); + read_unlock(&tasklist_lock); +- preempt_enable_no_resched(); + freezable_schedule(); + } else { + /* +diff -Nur linux-4.9.28.orig/kernel/softirq.c linux-4.9.28/kernel/softirq.c +--- linux-4.9.28.orig/kernel/softirq.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/softirq.c 2017-05-19 03:37:25.198177419 +0200 +@@ -21,10 +21,12 @@ + #include <linux/freezer.h> + #include <linux/kthread.h> + #include <linux/rcupdate.h> ++#include <linux/delay.h> + #include <linux/ftrace.h> + #include <linux/smp.h> + #include <linux/smpboot.h> + #include <linux/tick.h> ++#include <linux/locallock.h> + #include <linux/irq.h> + + #define CREATE_TRACE_POINTS +@@ -56,12 +58,108 @@ + static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; + + DEFINE_PER_CPU(struct task_struct *, ksoftirqd); ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) ++DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); ++#endif + + const char * const softirq_to_name[NR_SOFTIRQS] = { + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; + ++#ifdef CONFIG_NO_HZ_COMMON ++# ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct softirq_runner { ++ struct task_struct *runner[NR_SOFTIRQS]; ++}; ++ ++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); ++ ++static inline void softirq_set_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ ++ sr->runner[sirq] = current; ++} ++ ++static inline void softirq_clr_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ ++ sr->runner[sirq] = NULL; ++} ++ ++/* ++ * On preempt-rt a softirq running context might be blocked on a ++ * lock. There might be no other runnable task on this CPU because the ++ * lock owner runs on some other CPU. So we have to go into idle with ++ * the pending bit set. Therefor we need to check this otherwise we ++ * warn about false positives which confuses users and defeats the ++ * whole purpose of this test. ++ * ++ * This code is called with interrupts disabled. ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ u32 warnpending; ++ int i; ++ ++ if (rate_limit >= 10) ++ return; ++ ++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ for (i = 0; i < NR_SOFTIRQS; i++) { ++ struct task_struct *tsk = sr->runner[i]; ++ ++ /* ++ * The wakeup code in rtmutex.c wakes up the task ++ * _before_ it sets pi_blocked_on to NULL under ++ * tsk->pi_lock. So we need to check for both: state ++ * and pi_blocked_on. ++ */ ++ if (tsk) { ++ raw_spin_lock(&tsk->pi_lock); ++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { ++ /* Clear all bits pending in that task */ ++ warnpending &= ~(tsk->softirqs_raised); ++ warnpending &= ~(1 << i); ++ } ++ raw_spin_unlock(&tsk->pi_lock); ++ } ++ } ++ ++ if (warnpending) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ warnpending); ++ rate_limit++; ++ } ++} ++# else ++/* ++ * On !PREEMPT_RT we just printk rate limited: ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ ++ if (rate_limit < 10 && ++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ local_softirq_pending()); ++ rate_limit++; ++ } ++} ++# endif ++ ++#else /* !CONFIG_NO_HZ_COMMON */ ++static inline void softirq_set_runner(unsigned int sirq) { } ++static inline void softirq_clr_runner(unsigned int sirq) { } ++#endif ++ + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency +@@ -77,6 +175,38 @@ + wake_up_process(tsk); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void wakeup_timer_softirqd(void) ++{ ++ /* Interrupts are disabled: no need to stop preemption */ ++ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); ++ ++ if (tsk && tsk->state != TASK_RUNNING) ++ wake_up_process(tsk); ++} ++#endif ++ ++static void handle_softirq(unsigned int vec_nr) ++{ ++ struct softirq_action *h = softirq_vec + vec_nr; ++ int prev_count; ++ ++ prev_count = preempt_count(); ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, preempt_count()); ++ preempt_count_set(prev_count); ++ } ++} ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * If ksoftirqd is scheduled, we do not want to process pending softirqs + * right now. Let ksoftirqd handle this at its own rate, to get fairness. +@@ -88,6 +218,47 @@ + return tsk && (tsk->state == TASK_RUNNING); + } + ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return local_softirq_pending(); ++} ++ ++static void handle_pending_softirqs(u32 pending) ++{ ++ struct softirq_action *h = softirq_vec; ++ int softirq_bit; ++ ++ local_irq_enable(); ++ ++ h = softirq_vec; ++ ++ while ((softirq_bit = ffs(pending))) { ++ unsigned int vec_nr; ++ ++ h += softirq_bit - 1; ++ vec_nr = h - softirq_vec; ++ handle_softirq(vec_nr); ++ ++ h++; ++ pending >>= softirq_bit; ++ } ++ ++ rcu_bh_qs(); ++ local_irq_disable(); ++} ++ ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ if (ksoftirqd_softirq_pending()) { ++ __do_softirq(); ++ local_irq_enable(); ++ cond_resched_rcu_qs(); ++ return; ++ } ++ local_irq_enable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -243,10 +414,8 @@ + unsigned long end = jiffies + MAX_SOFTIRQ_TIME; + unsigned long old_flags = current->flags; + int max_restart = MAX_SOFTIRQ_RESTART; +- struct softirq_action *h; + bool in_hardirq; + __u32 pending; +- int softirq_bit; + + /* + * Mask out PF_MEMALLOC s current task context is borrowed for the +@@ -265,36 +434,7 @@ + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- local_irq_enable(); +- +- h = softirq_vec; +- +- while ((softirq_bit = ffs(pending))) { +- unsigned int vec_nr; +- int prev_count; +- +- h += softirq_bit - 1; +- +- vec_nr = h - softirq_vec; +- prev_count = preempt_count(); +- +- kstat_incr_softirqs_this_cpu(vec_nr); +- +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", +- vec_nr, softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count_set(prev_count); +- } +- h++; +- pending >>= softirq_bit; +- } +- +- rcu_bh_qs(); +- local_irq_disable(); ++ handle_pending_softirqs(pending); + + pending = local_softirq_pending(); + if (pending) { +@@ -331,6 +471,309 @@ + } + + /* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an interrupt or softirq, we're done ++ * (this also catches softirq-disabled code). We will ++ * actually run the softirq once we return from ++ * the irq or softirq. ++ * ++ * Otherwise we wake up ksoftirqd to make sure we ++ * schedule the softirq soon. ++ */ ++ if (!in_interrupt()) ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); ++} ++ ++static inline void local_bh_disable_nort(void) { local_bh_disable(); } ++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++static void ksoftirqd_set_sched_params(unsigned int cpu) { } ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On RT we serialize softirq execution with a cpu local lock per softirq ++ */ ++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); ++ ++void __init softirq_early_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < NR_SOFTIRQS; i++) ++ local_irq_lock_init(local_softirq_locks[i]); ++} ++ ++static void lock_softirq(int which) ++{ ++ local_lock(local_softirq_locks[which]); ++} ++ ++static void unlock_softirq(int which) ++{ ++ local_unlock(local_softirq_locks[which]); ++} ++ ++static void do_single_softirq(int which) ++{ ++ unsigned long old_flags = current->flags; ++ ++ current->flags &= ~PF_MEMALLOC; ++ vtime_account_irq_enter(current); ++ current->flags |= PF_IN_SOFTIRQ; ++ lockdep_softirq_enter(); ++ local_irq_enable(); ++ handle_softirq(which); ++ local_irq_disable(); ++ lockdep_softirq_exit(); ++ current->flags &= ~PF_IN_SOFTIRQ; ++ vtime_account_irq_enter(current); ++ tsk_restore_flags(current, old_flags, PF_MEMALLOC); ++} ++ ++/* ++ * Called with interrupts disabled. Process softirqs which were raised ++ * in current context (or on behalf of ksoftirqd). ++ */ ++static void do_current_softirqs(void) ++{ ++ while (current->softirqs_raised) { ++ int i = __ffs(current->softirqs_raised); ++ unsigned int pending, mask = (1U << i); ++ ++ current->softirqs_raised &= ~mask; ++ local_irq_enable(); ++ ++ /* ++ * If the lock is contended, we boost the owner to ++ * process the softirq or leave the critical section ++ * now. ++ */ ++ lock_softirq(i); ++ local_irq_disable(); ++ softirq_set_runner(i); ++ /* ++ * Check with the local_softirq_pending() bits, ++ * whether we need to process this still or if someone ++ * else took care of it. ++ */ ++ pending = local_softirq_pending(); ++ if (pending & mask) { ++ set_softirq_pending(pending & ~mask); ++ do_single_softirq(i); ++ } ++ softirq_clr_runner(i); ++ WARN_ON(current->softirq_nestcnt != 1); ++ local_irq_enable(); ++ unlock_softirq(i); ++ local_irq_disable(); ++ } ++} ++ ++void __local_bh_disable(void) ++{ ++ if (++current->softirq_nestcnt == 1) ++ migrate_disable(); ++} ++EXPORT_SYMBOL(__local_bh_disable); ++ ++void __local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ ++ local_irq_disable(); ++ if (current->softirq_nestcnt == 1 && current->softirqs_raised) ++ do_current_softirqs(); ++ local_irq_enable(); ++ ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); ++} ++EXPORT_SYMBOL(__local_bh_enable); ++ ++void _local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); ++} ++EXPORT_SYMBOL(_local_bh_enable); ++ ++int in_serving_softirq(void) ++{ ++ return current->flags & PF_IN_SOFTIRQ; ++} ++EXPORT_SYMBOL(in_serving_softirq); ++ ++/* Called with preemption disabled */ ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ current->softirq_nestcnt++; ++ ++ do_current_softirqs(); ++ current->softirq_nestcnt--; ++ local_irq_enable(); ++ cond_resched_rcu_qs(); ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled, but migration ++ * disabled. So the cpu can't go away under us. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq() && current->softirqs_raised) { ++ current->softirq_nestcnt++; ++ do_current_softirqs(); ++ current->softirq_nestcnt--; ++ } ++} ++ ++static void do_raise_softirq_irqoff(unsigned int nr) ++{ ++ unsigned int mask; ++ ++ mask = 1UL << nr; ++ ++ trace_softirq_raise(nr); ++ or_softirq_pending(mask); ++ ++ /* ++ * If we are not in a hard interrupt and inside a bh disabled ++ * region, we simply raise the flag on current. local_bh_enable() ++ * will make sure that the softirq is executed. Otherwise we ++ * delegate it to ksoftirqd. ++ */ ++ if (!in_irq() && current->softirq_nestcnt) ++ current->softirqs_raised |= mask; ++ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) ++ return; ++ ++ if (mask & TIMER_SOFTIRQS) ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; ++ else ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; ++} ++ ++static void wakeup_proper_softirq(unsigned int nr) ++{ ++ if ((1UL << nr) & TIMER_SOFTIRQS) ++ wakeup_timer_softirqd(); ++ else ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ if (!in_irq() && !current->softirq_nestcnt) ++ wakeup_proper_softirq(nr); ++} ++ ++/* ++ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd ++ */ ++void __raise_softirq_irqoff_ksoft(unsigned int nr) ++{ ++ unsigned int mask; ++ ++ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || ++ !__this_cpu_read(ktimer_softirqd))) ++ return; ++ mask = 1UL << nr; ++ ++ trace_softirq_raise(nr); ++ or_softirq_pending(mask); ++ if (mask & TIMER_SOFTIRQS) ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; ++ else ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; ++ wakeup_proper_softirq(nr); ++} ++ ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an hard interrupt we let irq return code deal ++ * with the wakeup of ksoftirqd. ++ */ ++ if (in_irq()) ++ return; ++ /* ++ * If we are in thread context but outside of a bh disabled ++ * region, we need to wake ksoftirqd as well. ++ * ++ * CHECKME: Some of the places which do that could be wrapped ++ * into local_bh_disable/enable pairs. Though it's unclear ++ * whether this is worth the effort. To find those places just ++ * raise a WARN() if the condition is met. ++ */ ++ if (!current->softirq_nestcnt) ++ wakeup_proper_softirq(nr); ++} ++ ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return current->softirqs_raised; ++} ++ ++static inline void local_bh_disable_nort(void) { } ++static inline void _local_bh_enable_nort(void) { } ++ ++static inline void ksoftirqd_set_sched_params(unsigned int cpu) ++{ ++ /* Take over all but timer pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; ++ local_irq_enable(); ++} ++ ++static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) ++{ ++ struct sched_param param = { .sched_priority = 1 }; ++ ++ sched_setscheduler(current, SCHED_FIFO, ¶m); ++ ++ /* Take over timer pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; ++ local_irq_enable(); ++} ++ ++static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, ++ bool online) ++{ ++ struct sched_param param = { .sched_priority = 0 }; ++ ++ sched_setscheduler(current, SCHED_NORMAL, ¶m); ++} ++ ++static int ktimer_softirqd_should_run(unsigned int cpu) ++{ ++ return current->softirqs_raised; ++} ++ ++#endif /* PREEMPT_RT_FULL */ ++/* + * Enter an interrupt context. + */ + void irq_enter(void) +@@ -341,9 +784,9 @@ + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ +- local_bh_disable(); ++ local_bh_disable_nort(); + tick_irq_enter(); +- _local_bh_enable(); ++ _local_bh_enable_nort(); + } + + __irq_enter(); +@@ -351,6 +794,7 @@ + + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (ksoftirqd_running()) + return; + +@@ -373,6 +817,18 @@ + } else { + wakeup_softirqd(); + } ++#else /* PREEMPT_RT_FULL */ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ if (__this_cpu_read(ksoftirqd) && ++ __this_cpu_read(ksoftirqd)->softirqs_raised) ++ wakeup_softirqd(); ++ if (__this_cpu_read(ktimer_softirqd) && ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised) ++ wakeup_timer_softirqd(); ++ local_irq_restore(flags); ++#endif + } + + static inline void tick_irq_exit(void) +@@ -409,26 +865,6 @@ + trace_hardirq_exit(); /* must be last! */ + } + +-/* +- * This function must run with irqs disabled! +- */ +-inline void raise_softirq_irqoff(unsigned int nr) +-{ +- __raise_softirq_irqoff(nr); +- +- /* +- * If we're in an interrupt or softirq, we're done +- * (this also catches softirq-disabled code). We will +- * actually run the softirq once we return from +- * the irq or softirq. +- * +- * Otherwise we wake up ksoftirqd to make sure we +- * schedule the softirq soon. +- */ +- if (!in_interrupt()) +- wakeup_softirqd(); +-} +- + void raise_softirq(unsigned int nr) + { + unsigned long flags; +@@ -438,12 +874,6 @@ + local_irq_restore(flags); + } + +-void __raise_softirq_irqoff(unsigned int nr) +-{ +- trace_softirq_raise(nr); +- or_softirq_pending(1UL << nr); +-} +- + void open_softirq(int nr, void (*action)(struct softirq_action *)) + { + softirq_vec[nr].action = action; +@@ -460,15 +890,45 @@ + static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); + static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); + ++static void inline ++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) ++{ ++ if (tasklet_trylock(t)) { ++again: ++ /* We may have been preempted before tasklet_trylock ++ * and __tasklet_action may have already run. ++ * So double check the sched bit while the takslet ++ * is locked before adding it to the list. ++ */ ++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { ++ t->next = NULL; ++ *head->tail = t; ++ head->tail = &(t->next); ++ raise_softirq_irqoff(nr); ++ tasklet_unlock(t); ++ } else { ++ /* This is subtle. If we hit the corner case above ++ * It is possible that we get preempted right here, ++ * and another task has successfully called ++ * tasklet_schedule(), then this function, and ++ * failed on the trylock. Thus we must be sure ++ * before releasing the tasklet lock, that the ++ * SCHED_BIT is clear. Otherwise the tasklet ++ * may get its SCHED_BIT set, but not added to the ++ * list ++ */ ++ if (!tasklet_tryunlock(t)) ++ goto again; ++ } ++ } ++} ++ + void __tasklet_schedule(struct tasklet_struct *t) + { + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- raise_softirq_irqoff(TASKLET_SOFTIRQ); ++ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); + local_irq_restore(flags); + } + EXPORT_SYMBOL(__tasklet_schedule); +@@ -478,10 +938,7 @@ + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); + local_irq_restore(flags); + } + EXPORT_SYMBOL(__tasklet_hi_schedule); +@@ -490,82 +947,122 @@ + { + BUG_ON(!irqs_disabled()); + +- t->next = __this_cpu_read(tasklet_hi_vec.head); +- __this_cpu_write(tasklet_hi_vec.head, t); +- __raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_hi_schedule(t); + } + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static __latent_entropy void tasklet_action(struct softirq_action *a) ++void tasklet_enable(struct tasklet_struct *t) + { +- struct tasklet_struct *list; ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_schedule(t); ++} ++EXPORT_SYMBOL(tasklet_enable); + +- local_irq_disable(); +- list = __this_cpu_read(tasklet_vec.head); +- __this_cpu_write(tasklet_vec.head, NULL); +- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); +- local_irq_enable(); ++static void __tasklet_action(struct softirq_action *a, ++ struct tasklet_struct *list) ++{ ++ int loops = 1000000; + + while (list) { + struct tasklet_struct *t = list; + + list = list->next; + +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); ++ /* ++ * Should always succeed - after a tasklist got on the ++ * list (after getting the SCHED bit set from 0 to 1), ++ * nothing but the tasklet softirq it got queued to can ++ * lock it: ++ */ ++ if (!tasklet_trylock(t)) { ++ WARN_ON(1); ++ continue; + } + +- local_irq_disable(); + t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- __raise_softirq_irqoff(TASKLET_SOFTIRQ); +- local_irq_enable(); ++ ++ /* ++ * If we cannot handle the tasklet because it's disabled, ++ * mark it as pending. tasklet_enable() will later ++ * re-schedule the tasklet. ++ */ ++ if (unlikely(atomic_read(&t->count))) { ++out_disabled: ++ /* implicit unlock: */ ++ wmb(); ++ t->state = TASKLET_STATEF_PENDING; ++ continue; ++ } ++ ++ /* ++ * After this point on the tasklet might be rescheduled ++ * on another CPU, but it can only be added to another ++ * CPU's tasklet list if we unlock the tasklet (which we ++ * dont do yet). ++ */ ++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ WARN_ON(1); ++ ++again: ++ t->func(t->data); ++ ++ /* ++ * Try to unlock the tasklet. We must use cmpxchg, because ++ * another CPU might have scheduled or disabled the tasklet. ++ * We only allow the STATE_RUN -> 0 transition here. ++ */ ++ while (!tasklet_tryunlock(t)) { ++ /* ++ * If it got disabled meanwhile, bail out: ++ */ ++ if (atomic_read(&t->count)) ++ goto out_disabled; ++ /* ++ * If it got scheduled meanwhile, re-execute ++ * the tasklet function: ++ */ ++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ goto again; ++ if (!--loops) { ++ printk("hm, tasklet state: %08lx\n", t->state); ++ WARN_ON(1); ++ tasklet_unlock(t); ++ break; ++ } ++ } + } + } + ++static void tasklet_action(struct softirq_action *a) ++{ ++ struct tasklet_struct *list; ++ ++ local_irq_disable(); ++ ++ list = __this_cpu_read(tasklet_vec.head); ++ __this_cpu_write(tasklet_vec.head, NULL); ++ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); ++ ++ local_irq_enable(); ++ ++ __tasklet_action(a, list); ++} ++ + static __latent_entropy void tasklet_hi_action(struct softirq_action *a) + { + struct tasklet_struct *list; + + local_irq_disable(); ++ + list = __this_cpu_read(tasklet_hi_vec.head); + __this_cpu_write(tasklet_hi_vec.head, NULL); + __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); +- local_irq_enable(); + +- while (list) { +- struct tasklet_struct *t = list; +- +- list = list->next; +- +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); +- } ++ local_irq_enable(); + +- local_irq_disable(); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- __raise_softirq_irqoff(HI_SOFTIRQ); +- local_irq_enable(); +- } ++ __tasklet_action(a, list); + } + + void tasklet_init(struct tasklet_struct *t, +@@ -586,7 +1083,7 @@ + + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + do { +- yield(); ++ msleep(1); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } + tasklet_unlock_wait(t); +@@ -660,25 +1157,26 @@ + open_softirq(HI_SOFTIRQ, tasklet_hi_action); + } + +-static int ksoftirqd_should_run(unsigned int cpu) ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++void tasklet_unlock_wait(struct tasklet_struct *t) + { +- return local_softirq_pending(); +-} +- +-static void run_ksoftirqd(unsigned int cpu) +-{ +- local_irq_disable(); +- if (local_softirq_pending()) { ++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + /* +- * We can safely run softirq on inline stack, as we are not deep +- * in the task stack here. ++ * Hack for now to avoid this busy-loop: + */ +- __do_softirq(); +- local_irq_enable(); +- cond_resched_rcu_qs(); +- return; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ msleep(1); ++#else ++ barrier(); ++#endif + } +- local_irq_enable(); ++} ++EXPORT_SYMBOL(tasklet_unlock_wait); ++#endif ++ ++static int ksoftirqd_should_run(unsigned int cpu) ++{ ++ return ksoftirqd_softirq_pending(); + } + + #ifdef CONFIG_HOTPLUG_CPU +@@ -745,17 +1243,31 @@ + + static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, ++ .setup = ksoftirqd_set_sched_params, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct smp_hotplug_thread softirq_timer_threads = { ++ .store = &ktimer_softirqd, ++ .setup = ktimer_softirqd_set_sched_params, ++ .cleanup = ktimer_softirqd_clr_sched_params, ++ .thread_should_run = ktimer_softirqd_should_run, ++ .thread_fn = run_ksoftirqd, ++ .thread_comm = "ktimersoftd/%u", ++}; ++#endif ++ + static __init int spawn_ksoftirqd(void) + { + cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, + takeover_tasklets); + BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); +- ++#ifdef CONFIG_PREEMPT_RT_FULL ++ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); ++#endif + return 0; + } + early_initcall(spawn_ksoftirqd); +diff -Nur linux-4.9.28.orig/kernel/stop_machine.c linux-4.9.28/kernel/stop_machine.c +--- linux-4.9.28.orig/kernel/stop_machine.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/stop_machine.c 2017-05-19 03:37:25.198177419 +0200 +@@ -36,7 +36,7 @@ + struct cpu_stopper { + struct task_struct *thread; + +- spinlock_t lock; ++ raw_spinlock_t lock; + bool enabled; /* is this stopper enabled? */ + struct list_head works; /* list of pending works */ + +@@ -78,14 +78,14 @@ + unsigned long flags; + bool enabled; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + enabled = stopper->enabled; + if (enabled) + __cpu_stop_queue_work(stopper, work); + else if (work->done) + cpu_stop_signal_done(work->done); +- spin_unlock_irqrestore(&stopper->lock, flags); + ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + return enabled; + } + +@@ -231,8 +231,8 @@ + struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); + int err; + retry: +- spin_lock_irq(&stopper1->lock); +- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_irq(&stopper1->lock); ++ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + + err = -ENOENT; + if (!stopper1->enabled || !stopper2->enabled) +@@ -255,8 +255,8 @@ + __cpu_stop_queue_work(stopper1, work1); + __cpu_stop_queue_work(stopper2, work2); + unlock: +- spin_unlock(&stopper2->lock); +- spin_unlock_irq(&stopper1->lock); ++ raw_spin_unlock(&stopper2->lock); ++ raw_spin_unlock_irq(&stopper1->lock); + + if (unlikely(err == -EDEADLK)) { + while (stop_cpus_in_progress) +@@ -448,9 +448,9 @@ + unsigned long flags; + int run; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + run = !list_empty(&stopper->works); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + return run; + } + +@@ -461,13 +461,13 @@ + + repeat: + work = NULL; +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); + } +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + + if (work) { + cpu_stop_fn_t fn = work->fn; +@@ -475,6 +475,8 @@ + struct cpu_stop_done *done = work->done; + int ret; + ++ /* XXX */ ++ + /* cpu stop callbacks must not sleep, make in_atomic() == T */ + preempt_count_inc(); + ret = fn(arg); +@@ -541,7 +543,7 @@ + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_init(&stopper->lock); ++ raw_spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); + } + +diff -Nur linux-4.9.28.orig/kernel/time/hrtimer.c linux-4.9.28/kernel/time/hrtimer.c +--- linux-4.9.28.orig/kernel/time/hrtimer.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/hrtimer.c 2017-05-19 03:37:25.198177419 +0200 +@@ -53,6 +53,7 @@ + #include <asm/uaccess.h> + + #include <trace/events/timer.h> ++#include <trace/events/hist.h> + + #include "tick-internal.h" + +@@ -695,6 +696,29 @@ + retrigger_next_event(NULL); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++static struct swork_event clock_set_delay_work; ++ ++static void run_clock_set_delay(struct swork_event *event) ++{ ++ clock_was_set(); ++} ++ ++void clock_was_set_delayed(void) ++{ ++ swork_queue(&clock_set_delay_work); ++} ++ ++static __init int create_clock_set_delay_thread(void) ++{ ++ WARN_ON(swork_get()); ++ INIT_SWORK(&clock_set_delay_work, run_clock_set_delay); ++ return 0; ++} ++early_initcall(create_clock_set_delay_thread); ++#else /* PREEMPT_RT_FULL */ ++ + static void clock_was_set_work(struct work_struct *work) + { + clock_was_set(); +@@ -710,6 +734,7 @@ + { + schedule_work(&hrtimer_work); + } ++#endif + + #else + +@@ -719,11 +744,8 @@ + static inline void hrtimer_switch_to_hres(void) { } + static inline void + hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } +-static inline int hrtimer_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) +-{ +- return 0; +-} ++static inline void hrtimer_reprogram(struct hrtimer *timer, ++ struct hrtimer_clock_base *base) { } + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } + static inline void retrigger_next_event(void *arg) { } + +@@ -855,6 +877,32 @@ + } + EXPORT_SYMBOL_GPL(hrtimer_forward); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define wake_up_timer_waiters(b) wake_up(&(b)->wait) ++ ++/** ++ * hrtimer_wait_for_timer - Wait for a running timer ++ * ++ * @timer: timer to wait for ++ * ++ * The function waits in case the timers callback function is ++ * currently executed on the waitqueue of the timer base. The ++ * waitqueue is woken up after the timer callback function has ++ * finished execution. ++ */ ++void hrtimer_wait_for_timer(const struct hrtimer *timer) ++{ ++ struct hrtimer_clock_base *base = timer->base; ++ ++ if (base && base->cpu_base && !timer->irqsafe) ++ wait_event(base->cpu_base->wait, ++ !(hrtimer_callback_running(timer))); ++} ++ ++#else ++# define wake_up_timer_waiters(b) do { } while (0) ++#endif ++ + /* + * enqueue_hrtimer - internal function to (re)start a timer + * +@@ -896,6 +944,11 @@ + if (!(state & HRTIMER_STATE_ENQUEUED)) + return; + ++ if (unlikely(!list_empty(&timer->cb_entry))) { ++ list_del_init(&timer->cb_entry); ++ return; ++ } ++ + if (!timerqueue_del(&base->active, &timer->node)) + cpu_base->active_bases &= ~(1 << base->index); + +@@ -991,7 +1044,16 @@ + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); + + timer_stats_hrtimer_set_start_info(timer); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ { ++ ktime_t now = new_base->get_time(); + ++ if (ktime_to_ns(tim) < ktime_to_ns(now)) ++ timer->praecox = now; ++ else ++ timer->praecox = ktime_set(0, 0); ++ } ++#endif + leftmost = enqueue_hrtimer(timer, new_base); + if (!leftmost) + goto unlock; +@@ -1063,7 +1125,7 @@ + + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_wait_for_timer(timer); + } + } + EXPORT_SYMBOL_GPL(hrtimer_cancel); +@@ -1127,6 +1189,7 @@ + + base = hrtimer_clockid_to_base(clock_id); + timer->base = &cpu_base->clock_base[base]; ++ INIT_LIST_HEAD(&timer->cb_entry); + timerqueue_init(&timer->node); + + #ifdef CONFIG_TIMER_STATS +@@ -1167,6 +1230,7 @@ + seq = raw_read_seqcount_begin(&cpu_base->seq); + + if (timer->state != HRTIMER_STATE_INACTIVE || ++ cpu_base->running_soft == timer || + cpu_base->running == timer) + return true; + +@@ -1265,10 +1329,112 @@ + cpu_base->running = NULL; + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ int leftmost; ++ ++ if (restart != HRTIMER_NORESTART && ++ !(timer->state & HRTIMER_STATE_ENQUEUED)) { ++ ++ leftmost = enqueue_hrtimer(timer, base); ++ if (!leftmost) ++ return; ++#ifdef CONFIG_HIGH_RES_TIMERS ++ if (!hrtimer_is_hres_active(timer)) { ++ /* ++ * Kick to reschedule the next tick to handle the new timer ++ * on dynticks target. ++ */ ++ if (base->cpu_base->nohz_active) ++ wake_up_nohz_cpu(base->cpu_base->cpu); ++ } else { ++ ++ hrtimer_reprogram(timer, base); ++ } ++#endif ++ } ++} ++ ++/* ++ * The changes in mainline which removed the callback modes from ++ * hrtimer are not yet working with -rt. The non wakeup_process() ++ * based callbacks which involve sleeping locks need to be treated ++ * seperately. ++ */ ++static void hrtimer_rt_run_pending(void) ++{ ++ enum hrtimer_restart (*fn)(struct hrtimer *); ++ struct hrtimer_cpu_base *cpu_base; ++ struct hrtimer_clock_base *base; ++ struct hrtimer *timer; ++ int index, restart; ++ ++ local_irq_disable(); ++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); ++ ++ raw_spin_lock(&cpu_base->lock); ++ ++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { ++ base = &cpu_base->clock_base[index]; ++ ++ while (!list_empty(&base->expired)) { ++ timer = list_first_entry(&base->expired, ++ struct hrtimer, cb_entry); ++ ++ /* ++ * Same as the above __run_hrtimer function ++ * just we run with interrupts enabled. ++ */ ++ debug_deactivate(timer); ++ cpu_base->running_soft = timer; ++ raw_write_seqcount_barrier(&cpu_base->seq); ++ ++ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); ++ timer_stats_account_hrtimer(timer); ++ fn = timer->function; ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ restart = fn(timer); ++ raw_spin_lock_irq(&cpu_base->lock); ++ ++ hrtimer_rt_reprogram(restart, timer, base); ++ raw_write_seqcount_barrier(&cpu_base->seq); ++ ++ WARN_ON_ONCE(cpu_base->running_soft != timer); ++ cpu_base->running_soft = NULL; ++ } ++ } ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ ++ wake_up_timer_waiters(cpu_base); ++} ++ ++static int hrtimer_rt_defer(struct hrtimer *timer) ++{ ++ if (timer->irqsafe) ++ return 0; ++ ++ __remove_hrtimer(timer, timer->base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &timer->base->expired); ++ return 1; ++} ++ ++#else ++ ++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } ++ ++#endif ++ ++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); ++ + static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) + { + struct hrtimer_clock_base *base = cpu_base->clock_base; + unsigned int active = cpu_base->active_bases; ++ int raise = 0; + + for (; active; base++, active >>= 1) { + struct timerqueue_node *node; +@@ -1284,6 +1450,15 @@ + + timer = container_of(node, struct hrtimer, node); + ++ trace_hrtimer_interrupt(raw_smp_processor_id(), ++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? ++ timer->praecox : hrtimer_get_expires(timer), ++ basenow)), ++ current, ++ timer->function == hrtimer_wakeup ? ++ container_of(timer, struct hrtimer_sleeper, ++ timer)->task : NULL); ++ + /* + * The immediate goal for using the softexpires is + * minimizing wakeups, not running timers at the +@@ -1299,9 +1474,14 @@ + if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) + break; + +- __run_hrtimer(cpu_base, base, timer, &basenow); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(cpu_base, base, timer, &basenow); ++ else ++ raise = 1; + } + } ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + #ifdef CONFIG_HIGH_RES_TIMERS +@@ -1464,16 +1644,18 @@ + void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) + { + sl->timer.function = hrtimer_wakeup; ++ sl->timer.irqsafe = 1; + sl->task = task; + } + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); + +-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) ++static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, ++ unsigned long state) + { + hrtimer_init_sleeper(t, current); + + do { +- set_current_state(TASK_INTERRUPTIBLE); ++ set_current_state(state); + hrtimer_start_expires(&t->timer, mode); + + if (likely(t->task)) +@@ -1515,7 +1697,8 @@ + HRTIMER_MODE_ABS); + hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); + +- if (do_nanosleep(&t, HRTIMER_MODE_ABS)) ++ /* cpu_chill() does not care about restart state. */ ++ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) + goto out; + + rmtp = restart->nanosleep.rmtp; +@@ -1532,8 +1715,10 @@ + return ret; + } + +-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, +- const enum hrtimer_mode mode, const clockid_t clockid) ++static long ++__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, ++ const enum hrtimer_mode mode, const clockid_t clockid, ++ unsigned long state) + { + struct restart_block *restart; + struct hrtimer_sleeper t; +@@ -1546,7 +1731,7 @@ + + hrtimer_init_on_stack(&t.timer, clockid, mode); + hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); +- if (do_nanosleep(&t, mode)) ++ if (do_nanosleep(&t, mode, state)) + goto out; + + /* Absolute timers do not update the rmtp value and restart: */ +@@ -1573,6 +1758,12 @@ + return ret; + } + ++long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, ++ const enum hrtimer_mode mode, const clockid_t clockid) ++{ ++ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); ++} ++ + SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, + struct timespec __user *, rmtp) + { +@@ -1587,6 +1778,26 @@ + return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Sleep for 1 ms in hope whoever holds what we want will let it go. ++ */ ++void cpu_chill(void) ++{ ++ struct timespec tu = { ++ .tv_nsec = NSEC_PER_MSEC, ++ }; ++ unsigned int freeze_flag = current->flags & PF_NOFREEZE; ++ ++ current->flags |= PF_NOFREEZE; ++ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, ++ TASK_UNINTERRUPTIBLE); ++ if (!freeze_flag) ++ current->flags &= ~PF_NOFREEZE; ++} ++EXPORT_SYMBOL(cpu_chill); ++#endif ++ + /* + * Functions related to boot-time initialization: + */ +@@ -1598,10 +1809,14 @@ + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + cpu_base->clock_base[i].cpu_base = cpu_base; + timerqueue_init_head(&cpu_base->clock_base[i].active); ++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); + } + + cpu_base->cpu = cpu; + hrtimer_init_hres(cpu_base); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ init_waitqueue_head(&cpu_base->wait); ++#endif + return 0; + } + +@@ -1671,9 +1886,26 @@ + + #endif /* CONFIG_HOTPLUG_CPU */ + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++static void run_hrtimer_softirq(struct softirq_action *h) ++{ ++ hrtimer_rt_run_pending(); ++} ++ ++static void hrtimers_open_softirq(void) ++{ ++ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); ++} ++ ++#else ++static void hrtimers_open_softirq(void) { } ++#endif ++ + void __init hrtimers_init(void) + { + hrtimers_prepare_cpu(smp_processor_id()); ++ hrtimers_open_softirq(); + } + + /** +diff -Nur linux-4.9.28.orig/kernel/time/itimer.c linux-4.9.28/kernel/time/itimer.c +--- linux-4.9.28.orig/kernel/time/itimer.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/itimer.c 2017-05-19 03:37:25.198177419 +0200 +@@ -213,6 +213,7 @@ + /* We are sharing ->siglock with it_real_fn() */ + if (hrtimer_try_to_cancel(timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); ++ hrtimer_wait_for_timer(&tsk->signal->real_timer); + goto again; + } + expires = timeval_to_ktime(value->it_value); +diff -Nur linux-4.9.28.orig/kernel/time/jiffies.c linux-4.9.28/kernel/time/jiffies.c +--- linux-4.9.28.orig/kernel/time/jiffies.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/jiffies.c 2017-05-19 03:37:25.198177419 +0200 +@@ -74,7 +74,8 @@ + .max_cycles = 10, + }; + +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); ++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); ++__cacheline_aligned_in_smp seqcount_t jiffies_seq; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void) +@@ -83,9 +84,9 @@ + u64 ret; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + ret = jiffies_64; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + return ret; + } + EXPORT_SYMBOL(get_jiffies_64); +diff -Nur linux-4.9.28.orig/kernel/time/ntp.c linux-4.9.28/kernel/time/ntp.c +--- linux-4.9.28.orig/kernel/time/ntp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/ntp.c 2017-05-19 03:37:25.198177419 +0200 +@@ -17,6 +17,7 @@ + #include <linux/module.h> + #include <linux/rtc.h> + #include <linux/math64.h> ++#include <linux/swork.h> + + #include "ntp_internal.h" + #include "timekeeping_internal.h" +@@ -568,10 +569,35 @@ + &sync_cmos_work, timespec64_to_jiffies(&next)); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++static void run_clock_set_delay(struct swork_event *event) ++{ ++ queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); ++} ++ ++static struct swork_event ntp_cmos_swork; ++ ++void ntp_notify_cmos_timer(void) ++{ ++ swork_queue(&ntp_cmos_swork); ++} ++ ++static __init int create_cmos_delay_thread(void) ++{ ++ WARN_ON(swork_get()); ++ INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay); ++ return 0; ++} ++early_initcall(create_cmos_delay_thread); ++ ++#else ++ + void ntp_notify_cmos_timer(void) + { + queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); + } ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + #else + void ntp_notify_cmos_timer(void) { } +diff -Nur linux-4.9.28.orig/kernel/time/posix-cpu-timers.c linux-4.9.28/kernel/time/posix-cpu-timers.c +--- linux-4.9.28.orig/kernel/time/posix-cpu-timers.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/posix-cpu-timers.c 2017-05-19 03:37:25.198177419 +0200 +@@ -3,6 +3,7 @@ + */ + + #include <linux/sched.h> ++#include <linux/sched/rt.h> + #include <linux/posix-timers.h> + #include <linux/errno.h> + #include <linux/math64.h> +@@ -620,7 +621,7 @@ + /* + * Disarm any old timer after extracting its expiry time. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + ret = 0; + old_incr = timer->it.cpu.incr; +@@ -1064,7 +1065,7 @@ + /* + * Now re-arm for the new expiry time. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + arm_timer(timer); + unlock_task_sighand(p, &flags); + +@@ -1153,13 +1154,13 @@ + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; + +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + /* + * The fast path checks that there are no expired thread or thread +@@ -1213,6 +1214,190 @@ + } + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include <linux/kthread.h> ++#include <linux/cpu.h> ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++ ++static int posix_cpu_timers_thread(void *data) ++{ ++ int cpu = (long)data; ++ ++ BUG_ON(per_cpu(posix_timer_task,cpu) != current); ++ ++ while (!kthread_should_stop()) { ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ if (cpu_is_offline(cpu)) ++ goto wait_to_die; ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ __set_current_state(TASK_RUNNING); ++ continue; ++ } ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++ } ++ return 0; ++ ++wait_to_die: ++ /* Wait for kthread_stop */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; ++} ++ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned long cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ if(!per_cpu(posix_timer_task, cpu)) ++ return; ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++ } ++} ++ ++/* ++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. ++ * Here we can start up the necessary migration thread for the new CPU. ++ */ ++static int posix_cpu_thread_call(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ struct task_struct *p; ++ struct sched_param param; ++ ++ switch (action) { ++ case CPU_UP_PREPARE: ++ p = kthread_create(posix_cpu_timers_thread, hcpu, ++ "posixcputmr/%d",cpu); ++ if (IS_ERR(p)) ++ return NOTIFY_BAD; ++ p->flags |= PF_NOFREEZE; ++ kthread_bind(p, cpu); ++ /* Must be high prio to avoid getting starved */ ++ param.sched_priority = MAX_RT_PRIO-1; ++ sched_setscheduler(p, SCHED_FIFO, ¶m); ++ per_cpu(posix_timer_task,cpu) = p; ++ break; ++ case CPU_ONLINE: ++ /* Strictly unneccessary, as first user will wake it. */ ++ wake_up_process(per_cpu(posix_timer_task,cpu)); ++ break; ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_UP_CANCELED: ++ /* Unbind it from offline cpu so it can run. Fall thru. */ ++ kthread_bind(per_cpu(posix_timer_task, cpu), ++ cpumask_any(cpu_online_mask)); ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++ case CPU_DEAD: ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* Register at highest priority so that task migration (migrate_all_tasks) ++ * happens before everything else. ++ */ ++static struct notifier_block posix_cpu_thread_notifier = { ++ .notifier_call = posix_cpu_thread_call, ++ .priority = 10 ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ void *hcpu = (void *)(long)smp_processor_id(); ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_possible_cpu(cpu) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); ++ register_cpu_notifier(&posix_cpu_thread_notifier); ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. +diff -Nur linux-4.9.28.orig/kernel/time/posix-timers.c linux-4.9.28/kernel/time/posix-timers.c +--- linux-4.9.28.orig/kernel/time/posix-timers.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/posix-timers.c 2017-05-19 03:37:25.198177419 +0200 +@@ -506,6 +506,7 @@ + static struct pid *good_sigevent(sigevent_t * event) + { + struct task_struct *rtn = current->group_leader; ++ int sig = event->sigev_signo; + + if ((event->sigev_notify & SIGEV_THREAD_ID ) && + (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || +@@ -514,7 +515,8 @@ + return NULL; + + if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && +- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) ++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || ++ sig_kernel_coredump(sig))) + return NULL; + + return task_pid(rtn); +@@ -826,6 +828,20 @@ + return overrun; + } + ++/* ++ * Protected by RCU! ++ */ ++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (kc->timer_set == common_timer_set) ++ hrtimer_wait_for_timer(&timr->it.real.timer); ++ else ++ /* FIXME: Whacky hack for posix-cpu-timers */ ++ schedule_timeout(1); ++#endif ++} ++ + /* Set a POSIX.1b interval timer. */ + /* timr->it_lock is taken. */ + static int +@@ -903,6 +919,7 @@ + if (!timr) + return -EINVAL; + ++ rcu_read_lock(); + kc = clockid_to_kclock(timr->it_clock); + if (WARN_ON_ONCE(!kc || !kc->timer_set)) + error = -EINVAL; +@@ -911,9 +928,12 @@ + + unlock_timer(timr, flag); + if (error == TIMER_RETRY) { ++ timer_wait_for_callback(kc, timr); + rtn = NULL; // We already got the old time... ++ rcu_read_unlock(); + goto retry; + } ++ rcu_read_unlock(); + + if (old_setting && !error && + copy_to_user(old_setting, &old_spec, sizeof (old_spec))) +@@ -951,10 +971,15 @@ + if (!timer) + return -EINVAL; + ++ rcu_read_lock(); + if (timer_delete_hook(timer) == TIMER_RETRY) { + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } ++ rcu_read_unlock(); + + spin_lock(¤t->sighand->siglock); + list_del(&timer->list); +@@ -980,8 +1005,18 @@ + retry_delete: + spin_lock_irqsave(&timer->it_lock, flags); + ++ /* On RT we can race with a deletion */ ++ if (!timer->it_signal) { ++ unlock_timer(timer, flags); ++ return; ++ } ++ + if (timer_delete_hook(timer) == TIMER_RETRY) { ++ rcu_read_lock(); + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } + list_del(&timer->list); +diff -Nur linux-4.9.28.orig/kernel/time/tick-broadcast-hrtimer.c linux-4.9.28/kernel/time/tick-broadcast-hrtimer.c +--- linux-4.9.28.orig/kernel/time/tick-broadcast-hrtimer.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/tick-broadcast-hrtimer.c 2017-05-19 03:37:25.198177419 +0200 +@@ -107,5 +107,6 @@ + { + hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + bctimer.function = bc_handler; ++ bctimer.irqsafe = true; + clockevents_register_device(&ce_broadcast_hrtimer); + } +diff -Nur linux-4.9.28.orig/kernel/time/tick-common.c linux-4.9.28/kernel/time/tick-common.c +--- linux-4.9.28.orig/kernel/time/tick-common.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/tick-common.c 2017-05-19 03:37:25.198177419 +0200 +@@ -79,13 +79,15 @@ + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -157,9 +159,9 @@ + ktime_t next; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + next = tick_next_period; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); + +diff -Nur linux-4.9.28.orig/kernel/time/tick-sched.c linux-4.9.28/kernel/time/tick-sched.c +--- linux-4.9.28.orig/kernel/time/tick-sched.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/tick-sched.c 2017-05-19 03:37:25.198177419 +0200 +@@ -62,7 +62,8 @@ + return; + + /* Reevaluate with jiffies_lock held */ +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 >= tick_period.tv64) { +@@ -85,10 +86,12 @@ + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } else { +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return; + } +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -99,12 +102,14 @@ + { + ktime_t period; + +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update.tv64 == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return period; + } + +@@ -215,6 +220,7 @@ + + static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { + .func = nohz_full_kick_func, ++ .flags = IRQ_WORK_HARD_IRQ, + }; + + /* +@@ -673,10 +679,10 @@ + + /* Read jiffies and the time when jiffies were updated last */ + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + basemono = last_jiffies_update.tv64; + basejiff = jiffies; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + ts->last_jiffies = basejiff; + + if (rcu_needs_cpu(basemono, &next_rcu) || +@@ -877,14 +883,7 @@ + return false; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { +- static int ratelimit; +- +- if (ratelimit < 10 && +- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { +- pr_warn("NOHZ: local_softirq_pending %02x\n", +- (unsigned int) local_softirq_pending()); +- ratelimit++; +- } ++ softirq_check_pending_idle(); + return false; + } + +@@ -1193,6 +1192,7 @@ + * Emulate tick processing via per-CPU hrtimers: + */ + hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ ts->sched_timer.irqsafe = 1; + ts->sched_timer.function = tick_sched_timer; + + /* Get the next period (per-CPU) */ +diff -Nur linux-4.9.28.orig/kernel/time/timekeeping.c linux-4.9.28/kernel/time/timekeeping.c +--- linux-4.9.28.orig/kernel/time/timekeeping.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/timekeeping.c 2017-05-19 03:37:25.198177419 +0200 +@@ -2328,8 +2328,10 @@ + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + do_timer(ticks); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } +diff -Nur linux-4.9.28.orig/kernel/time/timekeeping.h linux-4.9.28/kernel/time/timekeeping.h +--- linux-4.9.28.orig/kernel/time/timekeeping.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/timekeeping.h 2017-05-19 03:37:25.202177573 +0200 +@@ -19,7 +19,8 @@ + extern void do_timer(unsigned long ticks); + extern void update_wall_time(void); + +-extern seqlock_t jiffies_lock; ++extern raw_spinlock_t jiffies_lock; ++extern seqcount_t jiffies_seq; + + #define CS_NAME_LEN 32 + +diff -Nur linux-4.9.28.orig/kernel/time/timer.c linux-4.9.28/kernel/time/timer.c +--- linux-4.9.28.orig/kernel/time/timer.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/time/timer.c 2017-05-19 03:37:25.202177573 +0200 +@@ -193,8 +193,11 @@ + #endif + + struct timer_base { +- spinlock_t lock; ++ raw_spinlock_t lock; + struct timer_list *running_timer; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct swait_queue_head wait_for_running_timer; ++#endif + unsigned long clk; + unsigned long next_expiry; + unsigned int cpu; +@@ -203,6 +206,8 @@ + bool is_idle; + DECLARE_BITMAP(pending_map, WHEEL_SIZE); + struct hlist_head vectors[WHEEL_SIZE]; ++ struct hlist_head expired_lists[LVL_DEPTH]; ++ int expired_count; + } ____cacheline_aligned; + + static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); +@@ -948,10 +953,10 @@ + + if (!(tf & TIMER_MIGRATING)) { + base = get_timer_base(tf); +- spin_lock_irqsave(&base->lock, *flags); ++ raw_spin_lock_irqsave(&base->lock, *flags); + if (timer->flags == tf) + return base; +- spin_unlock_irqrestore(&base->lock, *flags); ++ raw_spin_unlock_irqrestore(&base->lock, *flags); + } + cpu_relax(); + } +@@ -1023,9 +1028,9 @@ + /* See the comment in lock_timer_base() */ + timer->flags |= TIMER_MIGRATING; + +- spin_unlock(&base->lock); ++ raw_spin_unlock(&base->lock); + base = new_base; +- spin_lock(&base->lock); ++ raw_spin_lock(&base->lock); + WRITE_ONCE(timer->flags, + (timer->flags & ~TIMER_BASEMASK) | base->cpu); + } +@@ -1050,7 +1055,7 @@ + } + + out_unlock: +- spin_unlock_irqrestore(&base->lock, flags); ++ raw_spin_unlock_irqrestore(&base->lock, flags); + + return ret; + } +@@ -1144,19 +1149,46 @@ + if (base != new_base) { + timer->flags |= TIMER_MIGRATING; + +- spin_unlock(&base->lock); ++ raw_spin_unlock(&base->lock); + base = new_base; +- spin_lock(&base->lock); ++ raw_spin_lock(&base->lock); + WRITE_ONCE(timer->flags, + (timer->flags & ~TIMER_BASEMASK) | cpu); + } + + debug_activate(timer, timer->expires); + internal_add_timer(base, timer); +- spin_unlock_irqrestore(&base->lock, flags); ++ raw_spin_unlock_irqrestore(&base->lock, flags); + } + EXPORT_SYMBOL_GPL(add_timer_on); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Wait for a running timer ++ */ ++static void wait_for_running_timer(struct timer_list *timer) ++{ ++ struct timer_base *base; ++ u32 tf = timer->flags; ++ ++ if (tf & TIMER_MIGRATING) ++ return; ++ ++ base = get_timer_base(tf); ++ swait_event(base->wait_for_running_timer, ++ base->running_timer != timer); ++} ++ ++# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) ++#else ++static inline void wait_for_running_timer(struct timer_list *timer) ++{ ++ cpu_relax(); ++} ++ ++# define wakeup_timer_waiters(b) do { } while (0) ++#endif ++ + /** + * del_timer - deactive a timer. + * @timer: the timer to be deactivated +@@ -1180,7 +1212,7 @@ + if (timer_pending(timer)) { + base = lock_timer_base(timer, &flags); + ret = detach_if_pending(timer, base, true); +- spin_unlock_irqrestore(&base->lock, flags); ++ raw_spin_unlock_irqrestore(&base->lock, flags); + } + + return ret; +@@ -1208,13 +1240,13 @@ + timer_stats_timer_clear_start_info(timer); + ret = detach_if_pending(timer, base, true); + } +- spin_unlock_irqrestore(&base->lock, flags); ++ raw_spin_unlock_irqrestore(&base->lock, flags); + + return ret; + } + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1274,7 +1306,7 @@ + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; +- cpu_relax(); ++ wait_for_running_timer(timer); + } + } + EXPORT_SYMBOL(del_timer_sync); +@@ -1323,7 +1355,8 @@ + } + } + +-static void expire_timers(struct timer_base *base, struct hlist_head *head) ++static inline void __expire_timers(struct timer_base *base, ++ struct hlist_head *head) + { + while (!hlist_empty(head)) { + struct timer_list *timer; +@@ -1339,33 +1372,53 @@ + fn = timer->function; + data = timer->data; + +- if (timer->flags & TIMER_IRQSAFE) { +- spin_unlock(&base->lock); ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && ++ timer->flags & TIMER_IRQSAFE) { ++ raw_spin_unlock(&base->lock); + call_timer_fn(timer, fn, data); +- spin_lock(&base->lock); ++ base->running_timer = NULL; ++ raw_spin_lock(&base->lock); + } else { +- spin_unlock_irq(&base->lock); ++ raw_spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); +- spin_lock_irq(&base->lock); ++ base->running_timer = NULL; ++ raw_spin_lock_irq(&base->lock); + } + } + } + +-static int __collect_expired_timers(struct timer_base *base, +- struct hlist_head *heads) ++static void expire_timers(struct timer_base *base) ++{ ++ struct hlist_head *head; ++ ++ while (base->expired_count--) { ++ head = base->expired_lists + base->expired_count; ++ __expire_timers(base, head); ++ } ++ base->expired_count = 0; ++} ++ ++static void __collect_expired_timers(struct timer_base *base) + { + unsigned long clk = base->clk; + struct hlist_head *vec; +- int i, levels = 0; ++ int i; + unsigned int idx; + ++ /* ++ * expire_timers() must be called at least once before we can ++ * collect more timers ++ */ ++ if (WARN_ON(base->expired_count)) ++ return; ++ + for (i = 0; i < LVL_DEPTH; i++) { + idx = (clk & LVL_MASK) + i * LVL_SIZE; + + if (__test_and_clear_bit(idx, base->pending_map)) { + vec = base->vectors + idx; +- hlist_move_list(vec, heads++); +- levels++; ++ hlist_move_list(vec, ++ &base->expired_lists[base->expired_count++]); + } + /* Is it time to look at the next level? */ + if (clk & LVL_CLK_MASK) +@@ -1373,7 +1426,6 @@ + /* Shift clock for the next level granularity */ + clk >>= LVL_CLK_SHIFT; + } +- return levels; + } + + #ifdef CONFIG_NO_HZ_COMMON +@@ -1515,7 +1567,7 @@ + if (cpu_is_offline(smp_processor_id())) + return expires; + +- spin_lock(&base->lock); ++ raw_spin_lock(&base->lock); + nextevt = __next_timer_interrupt(base); + is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); + base->next_expiry = nextevt; +@@ -1543,7 +1595,7 @@ + if ((expires - basem) > TICK_NSEC) + base->is_idle = true; + } +- spin_unlock(&base->lock); ++ raw_spin_unlock(&base->lock); + + return cmp_next_hrtimer_event(basem, expires); + } +@@ -1566,8 +1618,7 @@ + base->is_idle = false; + } + +-static int collect_expired_timers(struct timer_base *base, +- struct hlist_head *heads) ++static void collect_expired_timers(struct timer_base *base) + { + /* + * NOHZ optimization. After a long idle sleep we need to forward the +@@ -1584,20 +1635,49 @@ + if (time_after(next, jiffies)) { + /* The call site will increment clock! */ + base->clk = jiffies - 1; +- return 0; ++ return; + } + base->clk = next; + } +- return __collect_expired_timers(base, heads); ++ __collect_expired_timers(base); + } + #else +-static inline int collect_expired_timers(struct timer_base *base, +- struct hlist_head *heads) ++static inline void collect_expired_timers(struct timer_base *base) + { +- return __collect_expired_timers(base, heads); ++ __collect_expired_timers(base); + } + #endif + ++static int find_expired_timers(struct timer_base *base) ++{ ++ const unsigned long int end_clk = jiffies; ++ ++ while (!base->expired_count && time_after_eq(end_clk, base->clk)) { ++ collect_expired_timers(base); ++ base->clk++; ++ } ++ ++ return base->expired_count; ++} ++ ++/* Called from CPU tick routine to quickly collect expired timers */ ++static int tick_find_expired(struct timer_base *base) ++{ ++ int count; ++ ++ raw_spin_lock(&base->lock); ++ ++ if (unlikely(time_after(jiffies, base->clk + HZ))) { ++ /* defer to ktimersoftd; don't spend too long in irq context */ ++ count = -1; ++ } else ++ count = find_expired_timers(base); ++ ++ raw_spin_unlock(&base->lock); ++ ++ return count; ++} ++ + /* + * Called from the timer interrupt handler to charge one tick to the current + * process. user_tick is 1 if the tick is user time, 0 for system. +@@ -1608,13 +1688,13 @@ + + /* Note: this timer irq context must be accounted for as well. */ + account_process_tick(p, user_tick); ++ scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(user_tick); +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) + if (in_irq()) + irq_work_tick(); + #endif +- scheduler_tick(); + run_posix_cpu_timers(p); + } + +@@ -1624,24 +1704,13 @@ + */ + static inline void __run_timers(struct timer_base *base) + { +- struct hlist_head heads[LVL_DEPTH]; +- int levels; +- +- if (!time_after_eq(jiffies, base->clk)) +- return; ++ raw_spin_lock_irq(&base->lock); + +- spin_lock_irq(&base->lock); ++ while (find_expired_timers(base)) ++ expire_timers(base); + +- while (time_after_eq(jiffies, base->clk)) { +- +- levels = collect_expired_timers(base, heads); +- base->clk++; +- +- while (levels--) +- expire_timers(base, heads + levels); +- } +- base->running_timer = NULL; +- spin_unlock_irq(&base->lock); ++ raw_spin_unlock_irq(&base->lock); ++ wakeup_timer_waiters(base); + } + + /* +@@ -1651,6 +1720,8 @@ + { + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + ++ irq_work_tick_soft(); ++ + __run_timers(base); + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); +@@ -1665,12 +1736,12 @@ + + hrtimer_run_queues(); + /* Raise the softirq only if required. */ +- if (time_before(jiffies, base->clk)) { ++ if (time_before(jiffies, base->clk) || !tick_find_expired(base)) { + if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active) + return; + /* CPU is awake, so check the deferrable base. */ + base++; +- if (time_before(jiffies, base->clk)) ++ if (time_before(jiffies, base->clk) || !tick_find_expired(base)) + return; + } + raise_softirq(TIMER_SOFTIRQ); +@@ -1836,16 +1907,17 @@ + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. + */ +- spin_lock_irq(&new_base->lock); +- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_irq(&new_base->lock); ++ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); + + BUG_ON(old_base->running_timer); ++ BUG_ON(old_base->expired_count); + + for (i = 0; i < WHEEL_SIZE; i++) + migrate_timer_list(new_base, old_base->vectors + i); + +- spin_unlock(&old_base->lock); +- spin_unlock_irq(&new_base->lock); ++ raw_spin_unlock(&old_base->lock); ++ raw_spin_unlock_irq(&new_base->lock); + put_cpu_ptr(&timer_bases); + } + return 0; +@@ -1861,8 +1933,12 @@ + for (i = 0; i < NR_BASES; i++) { + base = per_cpu_ptr(&timer_bases[i], cpu); + base->cpu = cpu; +- spin_lock_init(&base->lock); ++ raw_spin_lock_init(&base->lock); + base->clk = jiffies; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ init_swait_queue_head(&base->wait_for_running_timer); ++#endif ++ base->expired_count = 0; + } + } + +diff -Nur linux-4.9.28.orig/kernel/trace/Kconfig linux-4.9.28/kernel/trace/Kconfig +--- linux-4.9.28.orig/kernel/trace/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/Kconfig 2017-05-19 03:37:25.202177573 +0200 +@@ -182,6 +182,24 @@ + enabled. This option and the preempt-off timing option can be + used together or separately.) + ++config INTERRUPT_OFF_HIST ++ bool "Interrupts-off Latency Histogram" ++ depends on IRQSOFF_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with interrupts disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If PREEMPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/irqsoff ++ + config PREEMPT_TRACER + bool "Preemption-off Latency Tracer" + default n +@@ -206,6 +224,24 @@ + enabled. This option and the irqs-off timing option can be + used together or separately.) + ++config PREEMPT_OFF_HIST ++ bool "Preemption-off Latency Histogram" ++ depends on PREEMPT_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with preemption disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/preemptoff ++ + config SCHED_TRACER + bool "Scheduling Latency Tracer" + select GENERIC_TRACER +@@ -251,6 +287,74 @@ + file. Every time a latency is greater than tracing_thresh, it will + be recorded into the ring buffer. + ++config WAKEUP_LATENCY_HIST ++ bool "Scheduling Latency Histogram" ++ depends on SCHED_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the scheduling latency of the highest priority task. ++ The histograms are disabled by default. To enable them, write a ++ non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup ++ ++ Two different algorithms are used, one to determine the latency of ++ processes that exclusively use the highest priority of the system and ++ another one to determine the latency of processes that share the ++ highest system priority with other processes. The former is used to ++ improve hardware and system software, the latter to optimize the ++ priority design of a given system. The histogram data will be ++ located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup ++ ++ and ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ ++config MISSED_TIMER_OFFSETS_HIST ++ depends on HIGH_RES_TIMERS ++ select GENERIC_TRACER ++ bool "Missed Timer Offsets Histogram" ++ help ++ Generate a histogram of missed timer offsets in microseconds. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets ++ ++ The histogram data will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ + config ENABLE_DEFAULT_TRACERS + bool "Trace process context switches and events" + depends on !GENERIC_TRACER +diff -Nur linux-4.9.28.orig/kernel/trace/latency_hist.c linux-4.9.28/kernel/trace/latency_hist.c +--- linux-4.9.28.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.9.28/kernel/trace/latency_hist.c 2017-05-19 03:37:25.202177573 +0200 +@@ -0,0 +1,1178 @@ ++/* ++ * kernel/trace/latency_hist.c ++ * ++ * Add support for histograms of preemption-off latency and ++ * interrupt-off latency and wakeup latency, it depends on ++ * Real-Time Preemption Support. ++ * ++ * Copyright (C) 2005 MontaVista Software, Inc. ++ * Yi Yang <yyang@ch.mvista.com> ++ * ++ * Converted to work with the new latency tracer. ++ * Copyright (C) 2008 Red Hat, Inc. ++ * Steven Rostedt <srostedt@redhat.com> ++ * ++ */ ++#include <linux/module.h> ++#include <linux/debugfs.h> ++#include <linux/seq_file.h> ++#include <linux/percpu.h> ++#include <linux/kallsyms.h> ++#include <linux/uaccess.h> ++#include <linux/sched.h> ++#include <linux/sched/rt.h> ++#include <linux/slab.h> ++#include <linux/atomic.h> ++#include <asm/div64.h> ++ ++#include "trace.h" ++#include <trace/events/sched.h> ++ ++#define NSECS_PER_USECS 1000L ++ ++#define CREATE_TRACE_POINTS ++#include <trace/events/hist.h> ++ ++enum { ++ IRQSOFF_LATENCY = 0, ++ PREEMPTOFF_LATENCY, ++ PREEMPTIRQSOFF_LATENCY, ++ WAKEUP_LATENCY, ++ WAKEUP_LATENCY_SHAREDPRIO, ++ MISSED_TIMER_OFFSETS, ++ TIMERANDWAKEUP_LATENCY, ++ MAX_LATENCY_TYPE, ++}; ++ ++#define MAX_ENTRY_NUM 10240 ++ ++struct hist_data { ++ atomic_t hist_mode; /* 0 log, 1 don't log */ ++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ ++ long min_lat; ++ long max_lat; ++ unsigned long long below_hist_bound_samples; ++ unsigned long long above_hist_bound_samples; ++ long long accumulate_lat; ++ unsigned long long total_samples; ++ unsigned long long hist_array[MAX_ENTRY_NUM]; ++}; ++ ++struct enable_data { ++ int latency_type; ++ int enabled; ++}; ++ ++static char *latency_hist_dir_root = "latency_hist"; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); ++static char *irqsoff_hist_dir = "irqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); ++static DEFINE_PER_CPU(int, hist_irqsoff_counting); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); ++static char *preemptoff_hist_dir = "preemptoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); ++static DEFINE_PER_CPU(int, hist_preemptoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); ++static char *preemptirqsoff_hist_dir = "preemptirqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); ++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); ++static struct enable_data preemptirqsoff_enabled_data = { ++ .latency_type = PREEMPTIRQSOFF_LATENCY, ++ .enabled = 0, ++}; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++struct maxlatproc_data { ++ char comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ int pid; ++ int current_pid; ++ int prio; ++ int current_prio; ++ long latency; ++ long timeroffset; ++ cycle_t timestamp; ++}; ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); ++static char *wakeup_latency_hist_dir = "wakeup"; ++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p); ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ bool preempt, struct task_struct *prev, struct task_struct *next); ++static notrace void probe_sched_migrate_task(void *, ++ struct task_struct *task, int cpu); ++static struct enable_data wakeup_latency_enabled_data = { ++ .latency_type = WAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); ++static DEFINE_PER_CPU(struct task_struct *, wakeup_task); ++static DEFINE_PER_CPU(int, wakeup_sharedprio); ++static unsigned long wakeup_pid; ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); ++static char *missed_timer_offsets_dir = "missed_timer_offsets"; ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long offset, struct task_struct *curr, struct task_struct *task); ++static struct enable_data missed_timer_offsets_enabled_data = { ++ .latency_type = MISSED_TIMER_OFFSETS, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); ++static unsigned long missed_timer_offsets_pid; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); ++static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; ++static struct enable_data timerandwakeup_enabled_data = { ++ .latency_type = TIMERANDWAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); ++#endif ++ ++void notrace latency_hist(int latency_type, int cpu, long latency, ++ long timeroffset, cycle_t stop, ++ struct task_struct *p) ++{ ++ struct hist_data *my_hist; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ if (!cpu_possible(cpu) || latency_type < 0 || ++ latency_type >= MAX_LATENCY_TYPE) ++ return; ++ ++ switch (latency_type) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ my_hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ my_hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ my_hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ my_hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ my_hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ ++ default: ++ return; ++ } ++ ++ latency += my_hist->offset; ++ ++ if (atomic_read(&my_hist->hist_mode) == 0) ++ return; ++ ++ if (latency < 0 || latency >= MAX_ENTRY_NUM) { ++ if (latency < 0) ++ my_hist->below_hist_bound_samples++; ++ else ++ my_hist->above_hist_bound_samples++; ++ } else ++ my_hist->hist_array[latency]++; ++ ++ if (unlikely(latency > my_hist->max_lat || ++ my_hist->min_lat == LONG_MAX)) { ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) { ++ strncpy(mp->comm, p->comm, sizeof(mp->comm)); ++ strncpy(mp->current_comm, current->comm, ++ sizeof(mp->current_comm)); ++ mp->pid = task_pid_nr(p); ++ mp->current_pid = task_pid_nr(current); ++ mp->prio = p->prio; ++ mp->current_prio = current->prio; ++ mp->latency = latency; ++ mp->timeroffset = timeroffset; ++ mp->timestamp = stop; ++ } ++#endif ++ my_hist->max_lat = latency; ++ } ++ if (unlikely(latency < my_hist->min_lat)) ++ my_hist->min_lat = latency; ++ my_hist->total_samples++; ++ my_hist->accumulate_lat += latency; ++} ++ ++static void *l_start(struct seq_file *m, loff_t *pos) ++{ ++ loff_t *index_ptr = NULL; ++ loff_t index = *pos; ++ struct hist_data *my_hist = m->private; ++ ++ if (index == 0) { ++ char minstr[32], avgstr[32], maxstr[32]; ++ ++ atomic_dec(&my_hist->hist_mode); ++ ++ if (likely(my_hist->total_samples)) { ++ long avg = (long) div64_s64(my_hist->accumulate_lat, ++ my_hist->total_samples); ++ snprintf(minstr, sizeof(minstr), "%ld", ++ my_hist->min_lat - my_hist->offset); ++ snprintf(avgstr, sizeof(avgstr), "%ld", ++ avg - my_hist->offset); ++ snprintf(maxstr, sizeof(maxstr), "%ld", ++ my_hist->max_lat - my_hist->offset); ++ } else { ++ strcpy(minstr, "<undef>"); ++ strcpy(avgstr, minstr); ++ strcpy(maxstr, minstr); ++ } ++ ++ seq_printf(m, "#Minimum latency: %s microseconds\n" ++ "#Average latency: %s microseconds\n" ++ "#Maximum latency: %s microseconds\n" ++ "#Total samples: %llu\n" ++ "#There are %llu samples lower than %ld" ++ " microseconds.\n" ++ "#There are %llu samples greater or equal" ++ " than %ld microseconds.\n" ++ "#usecs\t%16s\n", ++ minstr, avgstr, maxstr, ++ my_hist->total_samples, ++ my_hist->below_hist_bound_samples, ++ -my_hist->offset, ++ my_hist->above_hist_bound_samples, ++ MAX_ENTRY_NUM - my_hist->offset, ++ "samples"); ++ } ++ if (index < MAX_ENTRY_NUM) { ++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); ++ if (index_ptr) ++ *index_ptr = index; ++ } ++ ++ return index_ptr; ++} ++ ++static void *l_next(struct seq_file *m, void *p, loff_t *pos) ++{ ++ loff_t *index_ptr = p; ++ struct hist_data *my_hist = m->private; ++ ++ if (++*pos >= MAX_ENTRY_NUM) { ++ atomic_inc(&my_hist->hist_mode); ++ return NULL; ++ } ++ *index_ptr = *pos; ++ return index_ptr; ++} ++ ++static void l_stop(struct seq_file *m, void *p) ++{ ++ kfree(p); ++} ++ ++static int l_show(struct seq_file *m, void *p) ++{ ++ int index = *(loff_t *) p; ++ struct hist_data *my_hist = m->private; ++ ++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, ++ my_hist->hist_array[index]); ++ return 0; ++} ++ ++static const struct seq_operations latency_hist_seq_op = { ++ .start = l_start, ++ .next = l_next, ++ .stop = l_stop, ++ .show = l_show ++}; ++ ++static int latency_hist_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ ret = seq_open(file, &latency_hist_seq_op); ++ if (!ret) { ++ struct seq_file *seq = file->private_data; ++ seq->private = inode->i_private; ++ } ++ return ret; ++} ++ ++static const struct file_operations latency_hist_fops = { ++ .open = latency_hist_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = seq_release, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static void clear_maxlatprocdata(struct maxlatproc_data *mp) ++{ ++ mp->comm[0] = mp->current_comm[0] = '\0'; ++ mp->prio = mp->current_prio = mp->pid = mp->current_pid = ++ mp->latency = mp->timeroffset = -1; ++ mp->timestamp = 0; ++} ++#endif ++ ++static void hist_reset(struct hist_data *hist) ++{ ++ atomic_dec(&hist->hist_mode); ++ ++ memset(hist->hist_array, 0, sizeof(hist->hist_array)); ++ hist->below_hist_bound_samples = 0ULL; ++ hist->above_hist_bound_samples = 0ULL; ++ hist->min_lat = LONG_MAX; ++ hist->max_lat = LONG_MIN; ++ hist->total_samples = 0ULL; ++ hist->accumulate_lat = 0LL; ++ ++ atomic_inc(&hist->hist_mode); ++} ++ ++static ssize_t ++latency_hist_reset(struct file *file, const char __user *a, ++ size_t size, loff_t *off) ++{ ++ int cpu; ++ struct hist_data *hist = NULL; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ off_t latency_type = (off_t) file->private_data; ++ ++ for_each_online_cpu(cpu) { ++ ++ switch (latency_type) { ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ } ++ ++ hist_reset(hist); ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) ++ clear_maxlatprocdata(mp); ++#endif ++ } ++ ++ return size; ++} ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ int r; ++ unsigned long *this_pid = file->private_data; ++ ++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t do_pid(struct file *file, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ unsigned long pid; ++ unsigned long *this_pid = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = '\0'; ++ ++ if (kstrtoul(buf, 10, &pid)) ++ return -EINVAL; ++ ++ *this_pid = pid; ++ ++ return cnt; ++} ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ int r; ++ struct maxlatproc_data *mp = file->private_data; ++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); ++ unsigned long long t; ++ unsigned long usecs, secs; ++ char *buf; ++ ++ if (mp->pid == -1 || mp->current_pid == -1) { ++ buf = "(none)\n"; ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, ++ strlen(buf)); ++ } ++ ++ buf = kmalloc(strmaxlen, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ t = ns2usecs(mp->timestamp); ++ usecs = do_div(t, USEC_PER_SEC); ++ secs = (unsigned long) t; ++ r = snprintf(buf, strmaxlen, ++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, ++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, ++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, ++ secs, usecs); ++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++ kfree(buf); ++ return r; ++} ++#endif ++ ++static ssize_t ++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ struct enable_data *ed = file->private_data; ++ int r; ++ ++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t ++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ long enable; ++ struct enable_data *ed = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ ++ if (kstrtoul(buf, 10, &enable)) ++ return -EINVAL; ++ ++ if ((enable && ed->enabled) || (!enable && !ed->enabled)) ++ return cnt; ++ ++ if (enable) { ++ int ret; ++ ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ ret = register_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_preemptirqsoff_hist " ++ "to trace_preemptirqsoff_hist\n"); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ ret = register_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup\n"); ++ return ret; ++ } ++ ret = register_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup_new\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_stop " ++ "to trace_sched_switch\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_sched_migrate_task " ++ "to trace_sched_migrate_task\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ ret = register_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_hrtimer_interrupt " ++ "to trace_hrtimer_interrupt\n"); ++ return ret; ++ } ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ if (!wakeup_latency_enabled_data.enabled || ++ !missed_timer_offsets_enabled_data.enabled) ++ return -EINVAL; ++ break; ++#endif ++ default: ++ break; ++ } ++ } else { ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ for_each_online_cpu(cpu) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ per_cpu(hist_irqsoff_counting, ++ cpu) = 0; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ per_cpu(hist_preemptoff_counting, ++ cpu) = 0; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ per_cpu(hist_preemptirqsoff_counting, ++ cpu) = 0; ++#endif ++ } ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ unregister_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(wakeup_task, cpu) = NULL; ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } ++ } ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ unregister_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++ default: ++ break; ++ } ++ } ++ ed->enabled = enable; ++ return cnt; ++} ++ ++static const struct file_operations latency_hist_reset_fops = { ++ .open = tracing_open_generic, ++ .write = latency_hist_reset, ++}; ++ ++static const struct file_operations enable_fops = { ++ .open = tracing_open_generic, ++ .read = show_enable, ++ .write = do_enable, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static const struct file_operations pid_fops = { ++ .open = tracing_open_generic, ++ .read = show_pid, ++ .write = do_pid, ++}; ++ ++static const struct file_operations maxlatproc_fops = { ++ .open = tracing_open_generic, ++ .read = show_maxlatproc, ++}; ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, ++ int starthist) ++{ ++ int cpu = raw_smp_processor_id(); ++ int time_set = 0; ++ ++ if (starthist) { ++ cycle_t uninitialized_var(start); ++ ++ if (!preempt_count() && !irqs_disabled()) ++ return; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_OFF || reason == TRACE_START) && ++ !per_cpu(hist_irqsoff_counting, cpu)) { ++ per_cpu(hist_irqsoff_counting, cpu) = 1; ++ start = ftrace_now(cpu); ++ time_set++; ++ per_cpu(hist_irqsoff_start, cpu) = start; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_OFF || reason == TRACE_START) && ++ !per_cpu(hist_preemptoff_counting, cpu)) { ++ per_cpu(hist_preemptoff_counting, cpu) = 1; ++ if (!(time_set++)) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptoff_start, cpu) = start; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if (per_cpu(hist_irqsoff_counting, cpu) && ++ per_cpu(hist_preemptoff_counting, cpu) && ++ !per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; ++ if (!time_set) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptirqsoff_start, cpu) = start; ++ } ++#endif ++ } else { ++ cycle_t uninitialized_var(stop); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_ON || reason == TRACE_STOP) && ++ per_cpu(hist_irqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_irqsoff_start, cpu); ++ ++ stop = ftrace_now(cpu); ++ time_set++; ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, ++ stop, NULL); ++ } ++ per_cpu(hist_irqsoff_counting, cpu) = 0; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && ++ per_cpu(hist_preemptoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptoff_start, cpu); ++ ++ if (!(time_set++)) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, ++ 0, stop, NULL); ++ } ++ per_cpu(hist_preemptoff_counting, cpu) = 0; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if ((!per_cpu(hist_irqsoff_counting, cpu) || ++ !per_cpu(hist_preemptoff_counting, cpu)) && ++ per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); ++ ++ if (!time_set) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, ++ latency, 0, stop, NULL); ++ } ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; ++ } ++#endif ++ } ++} ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_RAW_SPINLOCK(wakeup_lock); ++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, ++ int cpu) ++{ ++ int old_cpu = task_cpu(task); ++ ++ if (cpu != old_cpu) { ++ unsigned long flags; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); ++ if (task == cpu_wakeup_task) { ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, old_cpu) = NULL; ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; ++ get_task_struct(cpu_wakeup_task); ++ } ++ ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++ } ++} ++ ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p) ++{ ++ unsigned long flags; ++ struct task_struct *curr = current; ++ int cpu = task_cpu(p); ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (wakeup_pid) { ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ if (likely(wakeup_pid != task_pid_nr(p))) ++ goto out; ++ } else { ++ if (likely(!rt_task(p)) || ++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || ++ p->prio > curr->prio) ++ goto out; ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ } ++ ++ if (cpu_wakeup_task) ++ put_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; ++ get_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task->preempt_timestamp_hist = ++ ftrace_now(raw_smp_processor_id()); ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++ ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ bool preempt, struct task_struct *prev, struct task_struct *next) ++{ ++ unsigned long flags; ++ int cpu = task_cpu(next); ++ long latency; ++ cycle_t stop; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (cpu_wakeup_task == NULL) ++ goto out; ++ ++ /* Already running? */ ++ if (unlikely(current == cpu_wakeup_task)) ++ goto out_reset; ++ ++ if (next != cpu_wakeup_task) { ++ if (next->prio < cpu_wakeup_task->prio) ++ goto out_reset; ++ ++ if (next->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ goto out; ++ } ++ ++ if (current->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ /* ++ * The task we are waiting for is about to be switched to. ++ * Calculate latency and store it in histogram. ++ */ ++ stop = ftrace_now(raw_smp_processor_id()); ++ ++ latency = ((long) (stop - next->preempt_timestamp_hist)) / ++ NSECS_PER_USECS; ++ ++ if (per_cpu(wakeup_sharedprio, cpu)) { ++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, ++ next); ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } else { ++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ if (timerandwakeup_enabled_data.enabled) { ++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, ++ next->timer_offset + latency, next->timer_offset, ++ stop, next); ++ } ++#endif ++ } ++ ++out_reset: ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ next->timer_offset = 0; ++#endif ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, cpu) = NULL; ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long latency_ns, struct task_struct *curr, ++ struct task_struct *task) ++{ ++ if (latency_ns <= 0 && task != NULL && rt_task(task) && ++ (task->prio < curr->prio || ++ (task->prio == curr->prio && ++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { ++ long latency; ++ cycle_t now; ++ ++ if (missed_timer_offsets_pid) { ++ if (likely(missed_timer_offsets_pid != ++ task_pid_nr(task))) ++ return; ++ } ++ ++ now = ftrace_now(cpu); ++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); ++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, ++ task); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ task->timer_offset = latency; ++#endif ++ } ++} ++#endif ++ ++static __init int latency_hist_init(void) ++{ ++ struct dentry *latency_hist_root = NULL; ++ struct dentry *dentry; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ struct dentry *dentry_sharedprio; ++#endif ++ struct dentry *entry; ++ struct dentry *enable_root; ++ int i = 0; ++ struct hist_data *my_hist; ++ char name[64]; ++ char *cpufmt = "CPU%d"; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ char *cpufmt_maxlatproc = "max_latency-CPU%d"; ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ dentry = tracing_init_dentry(); ++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); ++ enable_root = debugfs_create_dir("enable", latency_hist_root); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(irqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(irqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ dentry = debugfs_create_dir(preemptoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptirqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ entry = debugfs_create_file("preemptirqsoff", 0644, ++ enable_root, (void *)&preemptirqsoff_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ dentry = debugfs_create_dir(wakeup_latency_hist_dir, ++ latency_hist_root); ++ dentry_sharedprio = debugfs_create_dir( ++ wakeup_latency_hist_dir_sharedprio, dentry); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(wakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, ++ &per_cpu(wakeup_latency_hist_sharedprio, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ ++ mp = &per_cpu(wakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&wakeup_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, ++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); ++ entry = debugfs_create_file("wakeup", 0644, ++ enable_root, (void *)&wakeup_latency_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ dentry = debugfs_create_dir(missed_timer_offsets_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); ++ my_hist = &per_cpu(missed_timer_offsets, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&missed_timer_offsets_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); ++ entry = debugfs_create_file("missed_timer_offsets", 0644, ++ enable_root, (void *)&missed_timer_offsets_enabled_data, ++ &enable_fops); ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(timerandwakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(timerandwakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(timerandwakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("timerandwakeup", 0644, ++ enable_root, (void *)&timerandwakeup_enabled_data, ++ &enable_fops); ++#endif ++ return 0; ++} ++ ++device_initcall(latency_hist_init); +diff -Nur linux-4.9.28.orig/kernel/trace/Makefile linux-4.9.28/kernel/trace/Makefile +--- linux-4.9.28.orig/kernel/trace/Makefile 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/Makefile 2017-05-19 03:37:25.202177573 +0200 +@@ -38,6 +38,10 @@ + obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o + obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o + obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o ++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o ++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o + obj-$(CONFIG_NOP_TRACER) += trace_nop.o + obj-$(CONFIG_STACK_TRACER) += trace_stack.o + obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o +diff -Nur linux-4.9.28.orig/kernel/trace/trace.c linux-4.9.28/kernel/trace/trace.c +--- linux-4.9.28.orig/kernel/trace/trace.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/trace.c 2017-05-19 03:37:25.202177573 +0200 +@@ -1897,6 +1897,7 @@ + struct task_struct *tsk = current; + + entry->preempt_count = pc & 0xff; ++ entry->preempt_lazy_count = preempt_lazy_count(); + entry->pid = (tsk) ? tsk->pid : 0; + entry->flags = + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +@@ -1907,8 +1908,11 @@ + ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | +- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); ++ ++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +@@ -2892,14 +2896,17 @@ + + static void print_lat_help_header(struct seq_file *m) + { +- seq_puts(m, "# _------=> CPU# \n" +- "# / _-----=> irqs-off \n" +- "# | / _----=> need-resched \n" +- "# || / _---=> hardirq/softirq \n" +- "# ||| / _--=> preempt-depth \n" +- "# |||| / delay \n" +- "# cmd pid ||||| time | caller \n" +- "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# _--------=> CPU# \n" ++ "# / _-------=> irqs-off \n" ++ "# | / _------=> need-resched \n" ++ "# || / _-----=> need-resched_lazy \n" ++ "# ||| / _----=> hardirq/softirq \n" ++ "# |||| / _---=> preempt-depth \n" ++ "# ||||| / _--=> preempt-lazy-depth\n" ++ "# |||||| / _-=> migrate-disable \n" ++ "# ||||||| / delay \n" ++ "# cmd pid |||||||| time | caller \n" ++ "# \\ / |||||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +@@ -2925,11 +2932,14 @@ + print_event_info(buf, m); + seq_puts(m, "# _-----=> irqs-off\n" + "# / _----=> need-resched\n" +- "# | / _---=> hardirq/softirq\n" +- "# || / _--=> preempt-depth\n" +- "# ||| / delay\n" +- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" +- "# | | | |||| | |\n"); ++ "# |/ _-----=> need-resched_lazy\n" ++ "# || / _---=> hardirq/softirq\n" ++ "# ||| / _--=> preempt-depth\n" ++ "# |||| / _-=> preempt-lazy-depth\n" ++ "# ||||| / _-=> migrate-disable \n" ++ "# |||||| / delay\n" ++ "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n" ++ "# | | | ||||||| | |\n"); + } + + void +diff -Nur linux-4.9.28.orig/kernel/trace/trace_events.c linux-4.9.28/kernel/trace/trace_events.c +--- linux-4.9.28.orig/kernel/trace/trace_events.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/trace_events.c 2017-05-19 03:37:25.202177573 +0200 +@@ -187,6 +187,8 @@ + __common_field(unsigned char, flags); + __common_field(unsigned char, preempt_count); + __common_field(int, pid); ++ __common_field(unsigned short, migrate_disable); ++ __common_field(unsigned short, padding); + + return ret; + } +diff -Nur linux-4.9.28.orig/kernel/trace/trace.h linux-4.9.28/kernel/trace/trace.h +--- linux-4.9.28.orig/kernel/trace/trace.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/trace.h 2017-05-19 03:37:25.202177573 +0200 +@@ -124,6 +124,7 @@ + * NEED_RESCHED - reschedule is requested + * HARDIRQ - inside an interrupt handler + * SOFTIRQ - inside a softirq handler ++ * NEED_RESCHED_LAZY - lazy reschedule is requested + */ + enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, +@@ -133,6 +134,7 @@ + TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_PREEMPT_RESCHED = 0x20, + TRACE_FLAG_NMI = 0x40, ++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80, + }; + + #define TRACE_BUF_SIZE 1024 +diff -Nur linux-4.9.28.orig/kernel/trace/trace_irqsoff.c linux-4.9.28/kernel/trace/trace_irqsoff.c +--- linux-4.9.28.orig/kernel/trace/trace_irqsoff.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/trace_irqsoff.c 2017-05-19 03:37:25.202177573 +0200 +@@ -13,6 +13,7 @@ + #include <linux/uaccess.h> + #include <linux/module.h> + #include <linux/ftrace.h> ++#include <trace/events/hist.h> + + #include "trace.h" + +@@ -424,11 +425,13 @@ + { + if (preempt_trace() || irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1); + } + EXPORT_SYMBOL_GPL(start_critical_timings); + + void stop_critical_timings(void) + { ++ trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0); + if (preempt_trace() || irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -438,6 +441,7 @@ + #ifdef CONFIG_PROVE_LOCKING + void time_hardirqs_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(a0, a1); + } +@@ -446,6 +450,7 @@ + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(a0, a1); ++ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1); + } + + #else /* !CONFIG_PROVE_LOCKING */ +@@ -471,6 +476,7 @@ + */ + void trace_hardirqs_on(void) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -480,11 +486,13 @@ + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off); + + __visible void trace_hardirqs_on_caller(unsigned long caller_addr) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, caller_addr); + } +@@ -494,6 +502,7 @@ + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, caller_addr); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off_caller); + +@@ -503,12 +512,14 @@ + #ifdef CONFIG_PREEMPT_TRACER + void trace_preempt_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 0); + if (preempt_trace() && !irq_trace()) + stop_critical_timing(a0, a1); + } + + void trace_preempt_off(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 1); + if (preempt_trace() && !irq_trace()) + start_critical_timing(a0, a1); + } +diff -Nur linux-4.9.28.orig/kernel/trace/trace_output.c linux-4.9.28/kernel/trace/trace_output.c +--- linux-4.9.28.orig/kernel/trace/trace_output.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/trace/trace_output.c 2017-05-19 03:37:25.202177573 +0200 +@@ -386,6 +386,7 @@ + { + char hardsoft_irq; + char need_resched; ++ char need_resched_lazy; + char irqs_off; + int hardirq; + int softirq; +@@ -416,6 +417,9 @@ + break; + } + ++ need_resched_lazy = ++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; ++ + hardsoft_irq = + (nmi && hardirq) ? 'Z' : + nmi ? 'z' : +@@ -424,14 +428,25 @@ + softirq ? 's' : + '.' ; + +- trace_seq_printf(s, "%c%c%c", +- irqs_off, need_resched, hardsoft_irq); ++ trace_seq_printf(s, "%c%c%c%c", ++ irqs_off, need_resched, need_resched_lazy, ++ hardsoft_irq); + + if (entry->preempt_count) + trace_seq_printf(s, "%x", entry->preempt_count); + else + trace_seq_putc(s, '.'); + ++ if (entry->preempt_lazy_count) ++ trace_seq_printf(s, "%x", entry->preempt_lazy_count); ++ else ++ trace_seq_putc(s, '.'); ++ ++ if (entry->migrate_disable) ++ trace_seq_printf(s, "%x", entry->migrate_disable); ++ else ++ trace_seq_putc(s, '.'); ++ + return !trace_seq_has_overflowed(s); + } + +diff -Nur linux-4.9.28.orig/kernel/user.c linux-4.9.28/kernel/user.c +--- linux-4.9.28.orig/kernel/user.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/user.c 2017-05-19 03:37:25.202177573 +0200 +@@ -161,11 +161,11 @@ + if (!up) + return; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + struct user_struct *alloc_uid(kuid_t uid) +diff -Nur linux-4.9.28.orig/kernel/watchdog.c linux-4.9.28/kernel/watchdog.c +--- linux-4.9.28.orig/kernel/watchdog.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/watchdog.c 2017-05-19 03:37:25.202177573 +0200 +@@ -315,6 +315,8 @@ + + #ifdef CONFIG_HARDLOCKUP_DETECTOR + ++static DEFINE_RAW_SPINLOCK(watchdog_output_lock); ++ + static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, +@@ -348,6 +350,13 @@ + /* only print hardlockups once */ + if (__this_cpu_read(hard_watchdog_warn) == true) + return; ++ /* ++ * If early-printk is enabled then make sure we do not ++ * lock up in printk() and kill console logging: ++ */ ++ printk_kill(); ++ ++ raw_spin_lock(&watchdog_output_lock); + + pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + print_modules(); +@@ -365,6 +374,7 @@ + !test_and_set_bit(0, &hardlockup_allcpu_dumped)) + trigger_allbutself_cpu_backtrace(); + ++ raw_spin_unlock(&watchdog_output_lock); + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); + +@@ -512,6 +522,7 @@ + /* kick off the timer for the hardlockup detector */ + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; ++ hrtimer->irqsafe = 1; + + /* Enable the perf event */ + watchdog_nmi_enable(cpu); +diff -Nur linux-4.9.28.orig/kernel/workqueue.c linux-4.9.28/kernel/workqueue.c +--- linux-4.9.28.orig/kernel/workqueue.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/workqueue.c 2017-05-19 03:37:25.206177727 +0200 +@@ -48,6 +48,8 @@ + #include <linux/nodemask.h> + #include <linux/moduleparam.h> + #include <linux/uaccess.h> ++#include <linux/locallock.h> ++#include <linux/delay.h> + + #include "workqueue_internal.h" + +@@ -121,11 +123,16 @@ + * cpu or grabbing pool->lock is enough for read access. If + * POOL_DISASSOCIATED is set, it's identical to L. + * ++ * On RT we need the extra protection via rt_lock_idle_list() for ++ * the list manipulations against read access from ++ * wq_worker_sleeping(). All other places are nicely serialized via ++ * pool->lock. ++ * + * A: pool->attach_mutex protected. + * + * PL: wq_pool_mutex protected. + * +- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. ++ * PR: wq_pool_mutex protected for writes. RCU protected for reads. + * + * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. + * +@@ -134,7 +141,7 @@ + * + * WQ: wq->mutex protected. + * +- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. ++ * WR: wq->mutex protected for writes. RCU protected for reads. + * + * MD: wq_mayday_lock protected. + */ +@@ -185,7 +192,7 @@ + atomic_t nr_running ____cacheline_aligned_in_smp; + + /* +- * Destruction of pool is sched-RCU protected to allow dereferences ++ * Destruction of pool is RCU protected to allow dereferences + * from get_work_pool(). + */ + struct rcu_head rcu; +@@ -214,7 +221,7 @@ + /* + * Release of unbound pwq is punted to system_wq. See put_pwq() + * and pwq_unbound_release_workfn() for details. pool_workqueue +- * itself is also sched-RCU protected so that the first pwq can be ++ * itself is also RCU protected so that the first pwq can be + * determined without grabbing wq->mutex. + */ + struct work_struct unbound_release_work; +@@ -348,6 +355,8 @@ + struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); + ++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); ++ + static int worker_thread(void *__worker); + static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + +@@ -355,20 +364,20 @@ + #include <trace/events/workqueue.h> + + #define assert_rcu_or_pool_mutex() \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU or wq_pool_mutex should be held") ++ "RCU or wq_pool_mutex should be held") + + #define assert_rcu_or_wq_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex), \ +- "sched RCU or wq->mutex should be held") ++ "RCU or wq->mutex should be held") + + #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex) && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU, wq->mutex or wq_pool_mutex should be held") ++ "RCU, wq->mutex or wq_pool_mutex should be held") + + #define for_each_cpu_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ +@@ -380,7 +389,7 @@ + * @pool: iteration cursor + * @pi: integer used for iteration + * +- * This must be called either with wq_pool_mutex held or sched RCU read ++ * This must be called either with wq_pool_mutex held or RCU read + * locked. If the pool needs to be used beyond the locking in effect, the + * caller is responsible for guaranteeing that the pool stays online. + * +@@ -412,7 +421,7 @@ + * @pwq: iteration cursor + * @wq: the target workqueue + * +- * This must be called either with wq->mutex held or sched RCU read locked. ++ * This must be called either with wq->mutex held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -424,6 +433,31 @@ + if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ + else + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static inline void rt_lock_idle_list(struct worker_pool *pool) ++{ ++ preempt_disable(); ++} ++static inline void rt_unlock_idle_list(struct worker_pool *pool) ++{ ++ preempt_enable(); ++} ++static inline void sched_lock_idle_list(struct worker_pool *pool) { } ++static inline void sched_unlock_idle_list(struct worker_pool *pool) { } ++#else ++static inline void rt_lock_idle_list(struct worker_pool *pool) { } ++static inline void rt_unlock_idle_list(struct worker_pool *pool) { } ++static inline void sched_lock_idle_list(struct worker_pool *pool) ++{ ++ spin_lock_irq(&pool->lock); ++} ++static inline void sched_unlock_idle_list(struct worker_pool *pool) ++{ ++ spin_unlock_irq(&pool->lock); ++} ++#endif ++ ++ + #ifdef CONFIG_DEBUG_OBJECTS_WORK + + static struct debug_obj_descr work_debug_descr; +@@ -548,7 +582,7 @@ + * @wq: the target workqueue + * @node: the node ID + * +- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU ++ * This must be called with any of wq_pool_mutex, wq->mutex or RCU + * read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. +@@ -692,8 +726,8 @@ + * @work: the work item of interest + * + * Pools are created and destroyed under wq_pool_mutex, and allows read +- * access under sched-RCU read lock. As such, this function should be +- * called under wq_pool_mutex or with preemption disabled. ++ * access under RCU read lock. As such, this function should be ++ * called under wq_pool_mutex or inside of a rcu_read_lock() region. + * + * All fields of the returned pool are accessible as long as the above + * mentioned locking is in effect. If the returned pool needs to be used +@@ -830,50 +864,45 @@ + */ + static void wake_up_worker(struct worker_pool *pool) + { +- struct worker *worker = first_idle_worker(pool); ++ struct worker *worker; ++ ++ rt_lock_idle_list(pool); ++ ++ worker = first_idle_worker(pool); + + if (likely(worker)) + wake_up_process(worker->task); ++ ++ rt_unlock_idle_list(pool); + } + + /** +- * wq_worker_waking_up - a worker is waking up ++ * wq_worker_running - a worker is running again + * @task: task waking up +- * @cpu: CPU @task is waking up to +- * +- * This function is called during try_to_wake_up() when a worker is +- * being awoken. + * +- * CONTEXT: +- * spin_lock_irq(rq->lock) ++ * This function is called when a worker returns from schedule() + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu) ++void wq_worker_running(struct task_struct *task) + { + struct worker *worker = kthread_data(task); + +- if (!(worker->flags & WORKER_NOT_RUNNING)) { +- WARN_ON_ONCE(worker->pool->cpu != cpu); ++ if (!worker->sleeping) ++ return; ++ if (!(worker->flags & WORKER_NOT_RUNNING)) + atomic_inc(&worker->pool->nr_running); +- } ++ worker->sleeping = 0; + } + + /** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep + * +- * This function is called during schedule() when a busy worker is +- * going to sleep. Worker on the same cpu can be woken up by +- * returning pointer to its task. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) +- * +- * Return: +- * Worker task on @cpu to wake up, %NULL if none. ++ * This function is called from schedule() when a busy worker is ++ * going to sleep. + */ +-struct task_struct *wq_worker_sleeping(struct task_struct *task) ++void wq_worker_sleeping(struct task_struct *task) + { +- struct worker *worker = kthread_data(task), *to_wakeup = NULL; ++ struct worker *worker = kthread_data(task); + struct worker_pool *pool; + + /* +@@ -882,29 +911,26 @@ + * checking NOT_RUNNING. + */ + if (worker->flags & WORKER_NOT_RUNNING) +- return NULL; ++ return; + + pool = worker->pool; + +- /* this can only happen on the local cpu */ +- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) +- return NULL; ++ if (WARN_ON_ONCE(worker->sleeping)) ++ return; ++ ++ worker->sleeping = 1; + + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). + * Please read comment there. +- * +- * NOT_RUNNING is clear. This means that we're bound to and +- * running on the local cpu w/ rq lock held and preemption +- * disabled, which in turn means that none else could be +- * manipulating idle_list, so dereferencing idle_list without pool +- * lock is safe. + */ + if (atomic_dec_and_test(&pool->nr_running) && +- !list_empty(&pool->worklist)) +- to_wakeup = first_idle_worker(pool); +- return to_wakeup ? to_wakeup->task : NULL; ++ !list_empty(&pool->worklist)) { ++ sched_lock_idle_list(pool); ++ wake_up_worker(pool); ++ sched_unlock_idle_list(pool); ++ } + } + + /** +@@ -1098,12 +1124,14 @@ + { + if (pwq) { + /* +- * As both pwqs and pools are sched-RCU protected, the ++ * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ +- spin_lock_irq(&pwq->pool->lock); ++ rcu_read_lock(); ++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); + put_pwq(pwq); +- spin_unlock_irq(&pwq->pool->lock); ++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); ++ rcu_read_unlock(); + } + } + +@@ -1207,7 +1235,7 @@ + struct worker_pool *pool; + struct pool_workqueue *pwq; + +- local_irq_save(*flags); ++ local_lock_irqsave(pendingb_lock, *flags); + + /* try to steal the timer if it exists */ + if (is_dwork) { +@@ -1226,6 +1254,7 @@ + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) + return 0; + ++ rcu_read_lock(); + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. +@@ -1264,14 +1293,16 @@ + set_work_pool_and_keep_pending(work, pool->id); + + spin_unlock(&pool->lock); ++ rcu_read_unlock(); + return 1; + } + spin_unlock(&pool->lock); + fail: +- local_irq_restore(*flags); ++ rcu_read_unlock(); ++ local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; +- cpu_relax(); ++ cpu_chill(); + return -EAGAIN; + } + +@@ -1373,7 +1404,7 @@ + * queued or lose PENDING. Grabbing PENDING and queueing should + * happen with IRQ disabled. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + debug_work_activate(work); + +@@ -1381,6 +1412,7 @@ + if (unlikely(wq->flags & __WQ_DRAINING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; ++ rcu_read_lock(); + retry: + if (req_cpu == WORK_CPU_UNBOUND) + cpu = wq_select_unbound_cpu(raw_smp_processor_id()); +@@ -1437,10 +1469,8 @@ + /* pwq determined, queue */ + trace_workqueue_queue_work(req_cpu, pwq, work); + +- if (WARN_ON(!list_empty(&work->entry))) { +- spin_unlock(&pwq->pool->lock); +- return; +- } ++ if (WARN_ON(!list_empty(&work->entry))) ++ goto out; + + pwq->nr_in_flight[pwq->work_color]++; + work_flags = work_color_to_flags(pwq->work_color); +@@ -1458,7 +1488,9 @@ + + insert_work(pwq, work, worklist, work_flags); + ++out: + spin_unlock(&pwq->pool->lock); ++ rcu_read_unlock(); + } + + /** +@@ -1478,14 +1510,14 @@ + bool ret = false; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock,flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work(cpu, wq, work); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_work_on); +@@ -1552,14 +1584,14 @@ + unsigned long flags; + + /* read the comment in __queue_work() */ +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock, flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_delayed_work(cpu, wq, dwork, delay); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_delayed_work_on); +@@ -1594,7 +1626,7 @@ + + if (likely(ret >= 0)) { + __queue_delayed_work(cpu, wq, dwork, delay); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + } + + /* -ENOENT from try_to_grab_pending() becomes %true */ +@@ -1627,7 +1659,9 @@ + worker->last_active = jiffies; + + /* idle_list is LIFO */ ++ rt_lock_idle_list(pool); + list_add(&worker->entry, &pool->idle_list); ++ rt_unlock_idle_list(pool); + + if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) + mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); +@@ -1660,7 +1694,9 @@ + return; + worker_clr_flags(worker, WORKER_IDLE); + pool->nr_idle--; ++ rt_lock_idle_list(pool); + list_del_init(&worker->entry); ++ rt_unlock_idle_list(pool); + } + + static struct worker *alloc_worker(int node) +@@ -1826,7 +1862,9 @@ + pool->nr_workers--; + pool->nr_idle--; + ++ rt_lock_idle_list(pool); + list_del_init(&worker->entry); ++ rt_unlock_idle_list(pool); + worker->flags |= WORKER_DIE; + wake_up_process(worker->task); + } +@@ -2785,14 +2823,14 @@ + + might_sleep(); + +- local_irq_disable(); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (!pool) { +- local_irq_enable(); ++ rcu_read_unlock(); + return false; + } + +- spin_lock(&pool->lock); ++ spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -2821,10 +2859,11 @@ + else + lock_map_acquire_read(&pwq->wq->lockdep_map); + lock_map_release(&pwq->wq->lockdep_map); +- ++ rcu_read_unlock(); + return true; + already_gone: + spin_unlock_irq(&pool->lock); ++ rcu_read_unlock(); + return false; + } + +@@ -2911,7 +2950,7 @@ + + /* tell other tasks trying to grab @work to back off */ + mark_work_canceling(work); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + + flush_work(work); + clear_work_data(work); +@@ -2966,10 +3005,10 @@ + */ + bool flush_delayed_work(struct delayed_work *dwork) + { +- local_irq_disable(); ++ local_lock_irq(pendingb_lock); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); +- local_irq_enable(); ++ local_unlock_irq(pendingb_lock); + return flush_work(&dwork->work); + } + EXPORT_SYMBOL(flush_delayed_work); +@@ -2987,7 +3026,7 @@ + return false; + + set_work_pool_and_clear_pending(work, get_work_pool_id(work)); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + +@@ -3245,7 +3284,7 @@ + * put_unbound_pool - put a worker_pool + * @pool: worker_pool to put + * +- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU ++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU + * safe manner. get_unbound_pool() calls this function on its failure path + * and this function should be able to release pools which went through, + * successfully or not, init_worker_pool(). +@@ -3299,8 +3338,8 @@ + del_timer_sync(&pool->idle_timer); + del_timer_sync(&pool->mayday_timer); + +- /* sched-RCU protected to allow dereferences from get_work_pool() */ +- call_rcu_sched(&pool->rcu, rcu_free_pool); ++ /* RCU protected to allow dereferences from get_work_pool() */ ++ call_rcu(&pool->rcu, rcu_free_pool); + } + + /** +@@ -3407,14 +3446,14 @@ + put_unbound_pool(pool); + mutex_unlock(&wq_pool_mutex); + +- call_rcu_sched(&pwq->rcu, rcu_free_pwq); ++ call_rcu(&pwq->rcu, rcu_free_pwq); + + /* + * If we're the last pwq going away, @wq is already dead and no one + * is gonna access it anymore. Schedule RCU free. + */ + if (is_last) +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } + + /** +@@ -4064,7 +4103,7 @@ + * The base ref is never dropped on per-cpu pwqs. Directly + * schedule RCU free. + */ +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } else { + /* + * We're the sole accessor of @wq at this point. Directly +@@ -4157,7 +4196,8 @@ + struct pool_workqueue *pwq; + bool ret; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); ++ preempt_disable(); + + if (cpu == WORK_CPU_UNBOUND) + cpu = smp_processor_id(); +@@ -4168,7 +4208,8 @@ + pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + + ret = !list_empty(&pwq->delayed_works); +- rcu_read_unlock_sched(); ++ preempt_enable(); ++ rcu_read_unlock(); + + return ret; + } +@@ -4194,15 +4235,15 @@ + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- local_irq_save(flags); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { +- spin_lock(&pool->lock); ++ spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock(&pool->lock); ++ spin_unlock_irqrestore(&pool->lock, flags); + } +- local_irq_restore(flags); ++ rcu_read_unlock(); + + return ret; + } +@@ -4391,7 +4432,7 @@ + unsigned long flags; + int pi; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); + + pr_info("Showing busy workqueues and worker pools:\n"); + +@@ -4444,7 +4485,7 @@ + spin_unlock_irqrestore(&pool->lock, flags); + } + +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + + /* +@@ -4782,16 +4823,16 @@ + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ +- rcu_read_lock_sched(); ++ rcu_read_lock(); + for_each_pwq(pwq, wq) { + WARN_ON_ONCE(pwq->nr_active < 0); + if (pwq->nr_active) { + busy = true; +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + goto out_unlock; + } + } +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + out_unlock: + mutex_unlock(&wq_pool_mutex); +@@ -4981,7 +5022,8 @@ + const char *delim = ""; + int node, written = 0; + +- rcu_read_lock_sched(); ++ get_online_cpus(); ++ rcu_read_lock(); + for_each_node(node) { + written += scnprintf(buf + written, PAGE_SIZE - written, + "%s%d:%d", delim, node, +@@ -4989,7 +5031,8 @@ + delim = " "; + } + written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); ++ put_online_cpus(); + + return written; + } +diff -Nur linux-4.9.28.orig/kernel/workqueue_internal.h linux-4.9.28/kernel/workqueue_internal.h +--- linux-4.9.28.orig/kernel/workqueue_internal.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/kernel/workqueue_internal.h 2017-05-19 03:37:25.206177727 +0200 +@@ -43,6 +43,7 @@ + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ + int id; /* I: worker id */ ++ int sleeping; /* None */ + + /* + * Opaque string set with work_set_desc(). Printed out with task +@@ -68,7 +69,7 @@ + * Scheduler hooks for concurrency managed workqueue. Only to be used from + * sched/core.c and workqueue.c. + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu); +-struct task_struct *wq_worker_sleeping(struct task_struct *task); ++void wq_worker_running(struct task_struct *task); ++void wq_worker_sleeping(struct task_struct *task); + + #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ +diff -Nur linux-4.9.28.orig/lib/debugobjects.c linux-4.9.28/lib/debugobjects.c +--- linux-4.9.28.orig/lib/debugobjects.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/debugobjects.c 2017-05-19 03:37:25.206177727 +0200 +@@ -308,7 +308,10 @@ + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (preempt_count() == 0 && !irqs_disabled()) ++#endif ++ fill_pool(); + + db = get_bucket((unsigned long) addr); + +diff -Nur linux-4.9.28.orig/lib/idr.c linux-4.9.28/lib/idr.c +--- linux-4.9.28.orig/lib/idr.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/idr.c 2017-05-19 03:37:25.206177727 +0200 +@@ -30,6 +30,7 @@ + #include <linux/idr.h> + #include <linux/spinlock.h> + #include <linux/percpu.h> ++#include <linux/locallock.h> + + #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) + #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) +@@ -45,6 +46,37 @@ + static DEFINE_PER_CPU(int, idr_preload_cnt); + static DEFINE_SPINLOCK(simple_ida_lock); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static DEFINE_LOCAL_IRQ_LOCK(idr_lock); ++ ++static inline void idr_preload_lock(void) ++{ ++ local_lock(idr_lock); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ local_unlock(idr_lock); ++} ++ ++void idr_preload_end(void) ++{ ++ idr_preload_unlock(); ++} ++EXPORT_SYMBOL(idr_preload_end); ++#else ++static inline void idr_preload_lock(void) ++{ ++ preempt_disable(); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ preempt_enable(); ++} ++#endif ++ ++ + /* the maximum ID which can be allocated given idr->layers */ + static int idr_max(int layers) + { +@@ -115,14 +147,14 @@ + * context. See idr_preload() for details. + */ + if (!in_interrupt()) { +- preempt_disable(); ++ idr_preload_lock(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } +- preempt_enable(); ++ idr_preload_unlock(); + if (new) + return new; + } +@@ -366,7 +398,6 @@ + idr_mark_full(pa, id); + } + +- + /** + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading +@@ -401,7 +432,7 @@ + WARN_ON_ONCE(in_interrupt()); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); + +- preempt_disable(); ++ idr_preload_lock(); + + /* + * idr_alloc() is likely to succeed w/o full idr_layer buffer and +@@ -413,9 +444,9 @@ + while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { + struct idr_layer *new; + +- preempt_enable(); ++ idr_preload_unlock(); + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); +- preempt_disable(); ++ idr_preload_lock(); + if (!new) + break; + +diff -Nur linux-4.9.28.orig/lib/irq_poll.c linux-4.9.28/lib/irq_poll.c +--- linux-4.9.28.orig/lib/irq_poll.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/irq_poll.c 2017-05-19 03:37:25.206177727 +0200 +@@ -36,6 +36,7 @@ + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(irq_poll_sched); + +@@ -71,6 +72,7 @@ + local_irq_save(flags); + __irq_poll_complete(iop); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(irq_poll_complete); + +@@ -95,6 +97,7 @@ + } + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new +@@ -132,6 +135,7 @@ + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -195,6 +199,7 @@ + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + return 0; + } +diff -Nur linux-4.9.28.orig/lib/Kconfig linux-4.9.28/lib/Kconfig +--- linux-4.9.28.orig/lib/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/Kconfig 2017-05-19 03:37:25.206177727 +0200 +@@ -400,6 +400,7 @@ + + config CPUMASK_OFFSTACK + bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS ++ depends on !PREEMPT_RT_FULL + help + Use dynamic allocation for cpumask_var_t, instead of putting + them on the stack. This is a bit more expensive, but avoids +diff -Nur linux-4.9.28.orig/lib/locking-selftest.c linux-4.9.28/lib/locking-selftest.c +--- linux-4.9.28.orig/lib/locking-selftest.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/locking-selftest.c 2017-05-19 03:37:25.206177727 +0200 +@@ -590,6 +590,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) + +@@ -605,9 +607,12 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Enabling hardirqs with a softirq-safe lock held: + */ +@@ -640,6 +645,8 @@ + #undef E1 + #undef E2 + ++#endif ++ + /* + * Enabling irqs with an irq-safe lock held: + */ +@@ -663,6 +670,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) + +@@ -678,6 +687,8 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + +@@ -709,6 +720,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) + +@@ -724,6 +737,8 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + #undef E3 +@@ -757,6 +772,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) + +@@ -772,10 +789,14 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + #undef E3 + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * read-lock / write-lock irq inversion. + * +@@ -838,6 +859,10 @@ + #undef E2 + #undef E3 + ++#endif ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * read-lock / write-lock recursion that is actually safe. + */ +@@ -876,6 +901,8 @@ + #undef E2 + #undef E3 + ++#endif ++ + /* + * read-lock / write-lock recursion that is unsafe. + */ +@@ -1858,6 +1885,7 @@ + + printk(" --------------------------------------------------------------------------\n"); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * irq-context testcases: + */ +@@ -1870,6 +1898,28 @@ + + DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); + // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); ++#else ++ /* On -rt, we only do hardirq context test for raw spinlock */ ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); ++#endif + + ww_tests(); + +diff -Nur linux-4.9.28.orig/lib/percpu_ida.c linux-4.9.28/lib/percpu_ida.c +--- linux-4.9.28.orig/lib/percpu_ida.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/percpu_ida.c 2017-05-19 03:37:25.206177727 +0200 +@@ -26,6 +26,9 @@ + #include <linux/string.h> + #include <linux/spinlock.h> + #include <linux/percpu_ida.h> ++#include <linux/locallock.h> ++ ++static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock); + + struct percpu_ida_cpu { + /* +@@ -148,13 +151,13 @@ + unsigned long flags; + int tag; + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + + /* Fastpath */ + tag = alloc_local_tag(tags); + if (likely(tag >= 0)) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + return tag; + } + +@@ -173,6 +176,7 @@ + + if (!tags->nr_free) + alloc_global_tags(pool, tags); ++ + if (!tags->nr_free) + steal_tags(pool, tags); + +@@ -184,7 +188,7 @@ + } + + spin_unlock(&pool->lock); +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + + if (tag >= 0 || state == TASK_RUNNING) + break; +@@ -196,7 +200,7 @@ + + schedule(); + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + } + if (state != TASK_RUNNING) +@@ -221,7 +225,7 @@ + + BUG_ON(tag >= pool->nr_tags); + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + + spin_lock(&tags->lock); +@@ -253,7 +257,7 @@ + spin_unlock(&pool->lock); + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + } + EXPORT_SYMBOL_GPL(percpu_ida_free); + +@@ -345,7 +349,7 @@ + struct percpu_ida_cpu *remote; + unsigned cpu, i, err = 0; + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + for_each_possible_cpu(cpu) { + remote = per_cpu_ptr(pool->tag_cpu, cpu); + spin_lock(&remote->lock); +@@ -367,7 +371,7 @@ + } + spin_unlock(&pool->lock); + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + return err; + } + EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); +diff -Nur linux-4.9.28.orig/lib/radix-tree.c linux-4.9.28/lib/radix-tree.c +--- linux-4.9.28.orig/lib/radix-tree.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/radix-tree.c 2017-05-19 03:37:25.206177727 +0200 +@@ -36,7 +36,7 @@ + #include <linux/bitops.h> + #include <linux/rcupdate.h> + #include <linux/preempt.h> /* in_interrupt() */ +- ++#include <linux/locallock.h> + + /* Number of nodes in fully populated tree of given height */ + static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; +@@ -68,6 +68,7 @@ + struct radix_tree_node *nodes; + }; + static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); + + static inline void *node_to_entry(void *ptr) + { +@@ -290,13 +291,14 @@ + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = this_cpu_ptr(&radix_tree_preloads); ++ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes; + rtp->nodes = ret->private_data; + ret->private_data = NULL; + rtp->nr--; + } ++ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); + /* + * Update the allocation stack trace as this is more useful + * for debugging. +@@ -357,14 +359,14 @@ + */ + gfp_mask &= ~__GFP_ACCOUNT; + +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + rtp = this_cpu_ptr(&radix_tree_preloads); + while (rtp->nr < nr) { +- preempt_enable(); ++ local_unlock(radix_tree_preloads_lock); + node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + if (node == NULL) + goto out; +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + rtp = this_cpu_ptr(&radix_tree_preloads); + if (rtp->nr < nr) { + node->private_data = rtp->nodes; +@@ -406,7 +408,7 @@ + if (gfpflags_allow_blocking(gfp_mask)) + return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); + /* Preloading doesn't help anything with this gfp mask, skip it */ +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + return 0; + } + EXPORT_SYMBOL(radix_tree_maybe_preload); +@@ -422,7 +424,7 @@ + + /* Preloading doesn't help anything with this gfp mask, skip it */ + if (!gfpflags_allow_blocking(gfp_mask)) { +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + return 0; + } + +@@ -456,6 +458,12 @@ + return __radix_tree_preload(gfp_mask, nr_nodes); + } + ++void radix_tree_preload_end(void) ++{ ++ local_unlock(radix_tree_preloads_lock); ++} ++EXPORT_SYMBOL(radix_tree_preload_end); ++ + /* + * The maximum index which can be stored in a radix tree + */ +diff -Nur linux-4.9.28.orig/lib/scatterlist.c linux-4.9.28/lib/scatterlist.c +--- linux-4.9.28.orig/lib/scatterlist.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/scatterlist.c 2017-05-19 03:37:25.206177727 +0200 +@@ -620,7 +620,7 @@ + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { +- WARN_ON_ONCE(preemptible()); ++ WARN_ON_ONCE(!pagefault_disabled()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); +@@ -664,7 +664,7 @@ + if (!sg_miter_skip(&miter, skip)) + return false; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; +@@ -681,7 +681,7 @@ + + sg_miter_stop(&miter); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return offset; + } + EXPORT_SYMBOL(sg_copy_buffer); +diff -Nur linux-4.9.28.orig/lib/smp_processor_id.c linux-4.9.28/lib/smp_processor_id.c +--- linux-4.9.28.orig/lib/smp_processor_id.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/lib/smp_processor_id.c 2017-05-19 03:37:25.206177727 +0200 +@@ -39,8 +39,9 @@ + if (!printk_ratelimit()) + goto out_enable; + +- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", +- what1, what2, preempt_count() - 1, current->comm, current->pid); ++ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n", ++ what1, what2, preempt_count() - 1, __migrate_disabled(current), ++ current->comm, current->pid); + + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); +diff -Nur linux-4.9.28.orig/MAINTAINERS linux-4.9.28/MAINTAINERS +--- linux-4.9.28.orig/MAINTAINERS 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/MAINTAINERS 2017-05-19 03:37:25.122174217 +0200 +@@ -5196,6 +5196,23 @@ + F: include/uapi/linux/fuse.h + F: Documentation/filesystems/fuse.txt + ++FUTEX SUBSYSTEM ++M: Thomas Gleixner <tglx@linutronix.de> ++M: Ingo Molnar <mingo@redhat.com> ++R: Peter Zijlstra <peterz@infradead.org> ++R: Darren Hart <dvhart@infradead.org> ++L: linux-kernel@vger.kernel.org ++T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core ++S: Maintained ++F: kernel/futex.c ++F: kernel/futex_compat.c ++F: include/asm-generic/futex.h ++F: include/linux/futex.h ++F: include/uapi/linux/futex.h ++F: tools/testing/selftests/futex/ ++F: tools/perf/bench/futex* ++F: Documentation/*futex* ++ + FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit) + M: Rik Faith <faith@cs.unc.edu> + L: linux-scsi@vger.kernel.org +diff -Nur linux-4.9.28.orig/mm/backing-dev.c linux-4.9.28/mm/backing-dev.c +--- linux-4.9.28.orig/mm/backing-dev.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/backing-dev.c 2017-05-19 03:37:25.206177727 +0200 +@@ -457,9 +457,9 @@ + { + unsigned long flags; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return; + } + +diff -Nur linux-4.9.28.orig/mm/compaction.c linux-4.9.28/mm/compaction.c +--- linux-4.9.28.orig/mm/compaction.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/compaction.c 2017-05-19 03:37:25.206177727 +0200 +@@ -1593,10 +1593,12 @@ + block_start_pfn(cc->migrate_pfn, cc->order); + + if (cc->last_migrated_pfn < current_block_start) { +- cpu = get_cpu(); ++ cpu = get_cpu_light(); ++ local_lock_irq(swapvec_lock); + lru_add_drain_cpu(cpu); ++ local_unlock_irq(swapvec_lock); + drain_local_pages(zone); +- put_cpu(); ++ put_cpu_light(); + /* No more flushing until we migrate again */ + cc->last_migrated_pfn = 0; + } +diff -Nur linux-4.9.28.orig/mm/filemap.c linux-4.9.28/mm/filemap.c +--- linux-4.9.28.orig/mm/filemap.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/filemap.c 2017-05-19 03:37:25.210177882 +0200 +@@ -159,9 +159,12 @@ + * node->private_list is protected by + * mapping->tree_lock. + */ +- if (!list_empty(&node->private_list)) +- list_lru_del(&workingset_shadow_nodes, ++ if (!list_empty(&node->private_list)) { ++ local_lock(workingset_shadow_lock); ++ list_lru_del(&__workingset_shadow_nodes, + &node->private_list); ++ local_unlock(workingset_shadow_lock); ++ } + } + return 0; + } +@@ -217,8 +220,10 @@ + if (!dax_mapping(mapping) && !workingset_node_pages(node) && + list_empty(&node->private_list)) { + node->private_data = mapping; +- list_lru_add(&workingset_shadow_nodes, +- &node->private_list); ++ local_lock(workingset_shadow_lock); ++ list_lru_add(&__workingset_shadow_nodes, ++ &node->private_list); ++ local_unlock(workingset_shadow_lock); + } + } + +diff -Nur linux-4.9.28.orig/mm/highmem.c linux-4.9.28/mm/highmem.c +--- linux-4.9.28.orig/mm/highmem.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/highmem.c 2017-05-19 03:37:25.210177882 +0200 +@@ -29,10 +29,11 @@ + #include <linux/kgdb.h> + #include <asm/tlbflush.h> + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -107,8 +108,9 @@ + unsigned long totalhigh_pages __read_mostly; + EXPORT_SYMBOL(totalhigh_pages); + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); ++#endif + + unsigned int nr_free_highpages (void) + { +diff -Nur linux-4.9.28.orig/mm/Kconfig linux-4.9.28/mm/Kconfig +--- linux-4.9.28.orig/mm/Kconfig 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/Kconfig 2017-05-19 03:37:25.206177727 +0200 +@@ -410,7 +410,7 @@ + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL + select COMPACTION + select RADIX_TREE_MULTIORDER + help +diff -Nur linux-4.9.28.orig/mm/memcontrol.c linux-4.9.28/mm/memcontrol.c +--- linux-4.9.28.orig/mm/memcontrol.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/memcontrol.c 2017-05-19 03:37:25.210177882 +0200 +@@ -67,6 +67,7 @@ + #include <net/sock.h> + #include <net/ip.h> + #include "slab.h" ++#include <linux/locallock.h> + + #include <asm/uaccess.h> + +@@ -92,6 +93,8 @@ + #define do_swap_account 0 + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(event_lock); ++ + /* Whether legacy memory+swap accounting is active */ + static bool do_memsw_account(void) + { +@@ -1692,6 +1695,7 @@ + #define FLUSHING_CACHED_CHARGE 0 + }; + static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); ++static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll); + static DEFINE_MUTEX(percpu_charge_mutex); + + /** +@@ -1714,7 +1718,7 @@ + if (nr_pages > CHARGE_BATCH) + return ret; + +- local_irq_save(flags); ++ local_lock_irqsave(memcg_stock_ll, flags); + + stock = this_cpu_ptr(&memcg_stock); + if (memcg == stock->cached && stock->nr_pages >= nr_pages) { +@@ -1722,7 +1726,7 @@ + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(memcg_stock_ll, flags); + + return ret; + } +@@ -1749,13 +1753,13 @@ + struct memcg_stock_pcp *stock; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(memcg_stock_ll, flags); + + stock = this_cpu_ptr(&memcg_stock); + drain_stock(stock); + clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); + +- local_irq_restore(flags); ++ local_unlock_irqrestore(memcg_stock_ll, flags); + } + + /* +@@ -1767,7 +1771,7 @@ + struct memcg_stock_pcp *stock; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(memcg_stock_ll, flags); + + stock = this_cpu_ptr(&memcg_stock); + if (stock->cached != memcg) { /* reset if necessary */ +@@ -1776,7 +1780,7 @@ + } + stock->nr_pages += nr_pages; + +- local_irq_restore(flags); ++ local_unlock_irqrestore(memcg_stock_ll, flags); + } + + /* +@@ -1792,7 +1796,7 @@ + return; + /* Notify other cpus that system-wide "drain" is running */ + get_online_cpus(); +- curcpu = get_cpu(); ++ curcpu = get_cpu_light(); + for_each_online_cpu(cpu) { + struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); + struct mem_cgroup *memcg; +@@ -1809,7 +1813,7 @@ + schedule_work_on(cpu, &stock->work); + } + } +- put_cpu(); ++ put_cpu_light(); + put_online_cpus(); + mutex_unlock(&percpu_charge_mutex); + } +@@ -4555,12 +4559,12 @@ + + ret = 0; + +- local_irq_disable(); ++ local_lock_irq(event_lock); + mem_cgroup_charge_statistics(to, page, compound, nr_pages); + memcg_check_events(to, page); + mem_cgroup_charge_statistics(from, page, compound, -nr_pages); + memcg_check_events(from, page); +- local_irq_enable(); ++ local_unlock_irq(event_lock); + out_unlock: + unlock_page(page); + out: +@@ -5435,10 +5439,10 @@ + + commit_charge(page, memcg, lrucare); + +- local_irq_disable(); ++ local_lock_irq(event_lock); + mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); + memcg_check_events(memcg, page); +- local_irq_enable(); ++ local_unlock_irq(event_lock); + + if (do_memsw_account() && PageSwapCache(page)) { + swp_entry_t entry = { .val = page_private(page) }; +@@ -5494,14 +5498,14 @@ + memcg_oom_recover(memcg); + } + +- local_irq_save(flags); ++ local_lock_irqsave(event_lock, flags); + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); + __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); + __this_cpu_add(memcg->stat->nr_page_events, nr_pages); + memcg_check_events(memcg, dummy_page); +- local_irq_restore(flags); ++ local_unlock_irqrestore(event_lock, flags); + + if (!mem_cgroup_is_root(memcg)) + css_put_many(&memcg->css, nr_pages); +@@ -5656,10 +5660,10 @@ + + commit_charge(newpage, memcg, false); + +- local_irq_save(flags); ++ local_lock_irqsave(event_lock, flags); + mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); + memcg_check_events(memcg, newpage); +- local_irq_restore(flags); ++ local_unlock_irqrestore(event_lock, flags); + } + + DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); +@@ -5850,6 +5854,7 @@ + { + struct mem_cgroup *memcg, *swap_memcg; + unsigned short oldid; ++ unsigned long flags; + + VM_BUG_ON_PAGE(PageLRU(page), page); + VM_BUG_ON_PAGE(page_count(page), page); +@@ -5890,12 +5895,16 @@ + * important here to have the interrupts disabled because it is the + * only synchronisation we have for udpating the per-CPU variables. + */ ++ local_lock_irqsave(event_lock, flags); ++#ifndef CONFIG_PREEMPT_RT_BASE + VM_BUG_ON(!irqs_disabled()); ++#endif + mem_cgroup_charge_statistics(memcg, page, false, -1); + memcg_check_events(memcg, page); + + if (!mem_cgroup_is_root(memcg)) + css_put(&memcg->css); ++ local_unlock_irqrestore(event_lock, flags); + } + + /* +diff -Nur linux-4.9.28.orig/mm/mmu_context.c linux-4.9.28/mm/mmu_context.c +--- linux-4.9.28.orig/mm/mmu_context.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/mmu_context.c 2017-05-19 03:37:25.210177882 +0200 +@@ -23,6 +23,7 @@ + struct task_struct *tsk = current; + + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + if (active_mm != mm) { + atomic_inc(&mm->mm_count); +@@ -30,6 +31,7 @@ + } + tsk->mm = mm; + switch_mm(active_mm, mm, tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + #ifdef finish_arch_post_lock_switch + finish_arch_post_lock_switch(); +diff -Nur linux-4.9.28.orig/mm/page_alloc.c linux-4.9.28/mm/page_alloc.c +--- linux-4.9.28.orig/mm/page_alloc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/page_alloc.c 2017-05-19 03:37:25.210177882 +0200 +@@ -61,6 +61,7 @@ + #include <linux/page_ext.h> + #include <linux/hugetlb.h> + #include <linux/sched/rt.h> ++#include <linux/locallock.h> + #include <linux/page_owner.h> + #include <linux/kthread.h> + #include <linux/memcontrol.h> +@@ -281,6 +282,18 @@ + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ local_lock_irqsave_on(pa_lock, flags, cpu) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +@@ -1072,7 +1085,7 @@ + #endif /* CONFIG_DEBUG_VM */ + + /* +- * Frees a number of pages from the PCP lists ++ * Frees a number of pages which have been collected from the pcp lists. + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + * +@@ -1083,19 +1096,58 @@ + * pinned" detection logic. + */ + static void free_pcppages_bulk(struct zone *zone, int count, +- struct per_cpu_pages *pcp) ++ struct list_head *list) + { +- int migratetype = 0; +- int batch_free = 0; + unsigned long nr_scanned; + bool isolated_pageblocks; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + +- spin_lock(&zone->lock); + isolated_pageblocks = has_isolate_pageblock(zone); + nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); + if (nr_scanned) + __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); + ++ while (!list_empty(list)) { ++ struct page *page; ++ int mt; /* migratetype of the to-be-freed page */ ++ ++ page = list_first_entry(list, struct page, lru); ++ /* must delete as __free_one_page list manipulates */ ++ list_del(&page->lru); ++ ++ mt = get_pcppage_migratetype(page); ++ /* MIGRATE_ISOLATE page should not go to pcplists */ ++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); ++ /* Pageblock could have been isolated meanwhile */ ++ if (unlikely(isolated_pageblocks)) ++ mt = get_pageblock_migratetype(page); ++ ++ if (bulkfree_pcp_prepare(page)) ++ continue; ++ ++ __free_one_page(page, page_to_pfn(page), zone, 0, mt); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ count--; ++ } ++ WARN_ON(count != 0); ++ spin_unlock_irqrestore(&zone->lock, flags); ++} ++ ++/* ++ * Moves a number of pages from the PCP lists to free list which ++ * is freed outside of the locked region. ++ * ++ * Assumes all pages on list are in same zone, and of same order. ++ * count is the number of pages to free. ++ */ ++static void isolate_pcp_pages(int count, struct per_cpu_pages *src, ++ struct list_head *dst) ++{ ++ int migratetype = 0; ++ int batch_free = 0; ++ + while (count) { + struct page *page; + struct list_head *list; +@@ -1111,7 +1163,7 @@ + batch_free++; + if (++migratetype == MIGRATE_PCPTYPES) + migratetype = 0; +- list = &pcp->lists[migratetype]; ++ list = &src->lists[migratetype]; + } while (list_empty(list)); + + /* This is the only non-empty list. Free them all. */ +@@ -1119,27 +1171,12 @@ + batch_free = count; + + do { +- int mt; /* migratetype of the to-be-freed page */ +- + page = list_last_entry(list, struct page, lru); +- /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); + +- mt = get_pcppage_migratetype(page); +- /* MIGRATE_ISOLATE page should not go to pcplists */ +- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); +- /* Pageblock could have been isolated meanwhile */ +- if (unlikely(isolated_pageblocks)) +- mt = get_pageblock_migratetype(page); +- +- if (bulkfree_pcp_prepare(page)) +- continue; +- +- __free_one_page(page, page_to_pfn(page), zone, 0, mt); +- trace_mm_page_pcpu_drain(page, 0, mt); ++ list_add(&page->lru, dst); + } while (--count && --batch_free && !list_empty(list)); + } +- spin_unlock(&zone->lock); + } + + static void free_one_page(struct zone *zone, +@@ -1148,7 +1185,9 @@ + int migratetype) + { + unsigned long nr_scanned; +- spin_lock(&zone->lock); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); + if (nr_scanned) + __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); +@@ -1158,7 +1197,7 @@ + migratetype = get_pfnblock_migratetype(page, pfn); + } + __free_one_page(page, pfn, zone, order, migratetype); +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + } + + static void __meminit __init_single_page(struct page *page, unsigned long pfn, +@@ -1244,10 +1283,10 @@ + return; + + migratetype = get_pfnblock_migratetype(page, pfn); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_events(PGFREE, 1 << order); + free_one_page(page_zone(page), page, pfn, order, migratetype); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + static void __init __free_pages_boot_core(struct page *page, unsigned int order) +@@ -2246,16 +2285,18 @@ + void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + { + unsigned long flags; ++ LIST_HEAD(dst); + int to_drain, batch; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + batch = READ_ONCE(pcp->batch); + to_drain = min(pcp->count, batch); + if (to_drain > 0) { +- free_pcppages_bulk(zone, to_drain, pcp); ++ isolate_pcp_pages(to_drain, pcp, &dst); + pcp->count -= to_drain; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, to_drain, &dst); + } + #endif + +@@ -2271,16 +2312,21 @@ + unsigned long flags; + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); ++ int count; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +- if (pcp->count) { +- free_pcppages_bulk(zone, pcp->count, pcp); ++ count = pcp->count; ++ if (count) { ++ isolate_pcp_pages(count, pcp, &dst); + pcp->count = 0; + } +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); ++ if (count) ++ free_pcppages_bulk(zone, count, &dst); + } + + /* +@@ -2366,8 +2412,17 @@ + else + cpumask_clear_cpu(cpu, &cpus_with_pcps); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, + zone, 1); ++#else ++ for_each_cpu(cpu, &cpus_with_pcps) { ++ if (zone) ++ drain_pages_zone(cpu, zone); ++ else ++ drain_pages(cpu); ++ } ++#endif + } + + #ifdef CONFIG_HIBERNATION +@@ -2427,7 +2482,7 @@ + + migratetype = get_pfnblock_migratetype(page, pfn); + set_pcppage_migratetype(page, migratetype); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_event(PGFREE); + + /* +@@ -2453,12 +2508,17 @@ + pcp->count++; + if (pcp->count >= pcp->high) { + unsigned long batch = READ_ONCE(pcp->batch); +- free_pcppages_bulk(zone, batch, pcp); ++ LIST_HEAD(dst); ++ ++ isolate_pcp_pages(batch, pcp, &dst); + pcp->count -= batch; ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, batch, &dst); ++ return; + } + + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -2600,7 +2660,7 @@ + struct per_cpu_pages *pcp; + struct list_head *list; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + do { + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; +@@ -2627,7 +2687,7 @@ + * allocate greater than order-1 page units with __GFP_NOFAIL. + */ + WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + + do { + page = NULL; +@@ -2639,22 +2699,24 @@ + if (!page) + page = __rmqueue(zone, order, migratetype); + } while (page && check_new_pages(page, order)); +- spin_unlock(&zone->lock); +- if (!page) ++ if (!page) { ++ spin_unlock(&zone->lock); + goto failed; ++ } + __mod_zone_freepage_state(zone, -(1 << order), + get_pcppage_migratetype(page)); ++ spin_unlock(&zone->lock); + } + + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + VM_BUG_ON_PAGE(bad_range(zone, page), page); + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -6531,7 +6593,9 @@ + int cpu = (unsigned long)hcpu; + + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { ++ local_lock_irq_on(swapvec_lock, cpu); + lru_add_drain_cpu(cpu); ++ local_unlock_irq_on(swapvec_lock, cpu); + drain_pages(cpu); + + /* +@@ -6557,6 +6621,7 @@ + void __init page_alloc_init(void) + { + hotcpu_notifier(page_alloc_cpu_notify, 0); ++ local_irq_lock_init(pa_lock); + } + + /* +@@ -7385,7 +7450,7 @@ + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -7394,7 +7459,7 @@ + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE +diff -Nur linux-4.9.28.orig/mm/percpu.c linux-4.9.28/mm/percpu.c +--- linux-4.9.28.orig/mm/percpu.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/percpu.c 2017-05-19 03:37:25.210177882 +0200 +@@ -1283,18 +1283,7 @@ + } + EXPORT_SYMBOL_GPL(free_percpu); + +-/** +- * is_kernel_percpu_address - test whether address is from static percpu area +- * @addr: address to test +- * +- * Test whether @addr belongs to in-kernel static percpu area. Module +- * static percpu areas are not considered. For those, use +- * is_module_percpu_address(). +- * +- * RETURNS: +- * %true if @addr is from in-kernel static percpu area, %false otherwise. +- */ +-bool is_kernel_percpu_address(unsigned long addr) ++bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) + { + #ifdef CONFIG_SMP + const size_t static_size = __per_cpu_end - __per_cpu_start; +@@ -1303,16 +1292,39 @@ + + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(base, cpu); ++ void *va = (void *)addr; + +- if ((void *)addr >= start && (void *)addr < start + static_size) ++ if (va >= start && va < start + static_size) { ++ if (can_addr) { ++ *can_addr = (unsigned long) (va - start); ++ *can_addr += (unsigned long) ++ per_cpu_ptr(base, get_boot_cpu_id()); ++ } + return true; +- } ++ } ++ } + #endif + /* on UP, can't distinguish from other static vars, always false */ + return false; + } + + /** ++ * is_kernel_percpu_address - test whether address is from static percpu area ++ * @addr: address to test ++ * ++ * Test whether @addr belongs to in-kernel static percpu area. Module ++ * static percpu areas are not considered. For those, use ++ * is_module_percpu_address(). ++ * ++ * RETURNS: ++ * %true if @addr is from in-kernel static percpu area, %false otherwise. ++ */ ++bool is_kernel_percpu_address(unsigned long addr) ++{ ++ return __is_kernel_percpu_address(addr, NULL); ++} ++ ++/** + * per_cpu_ptr_to_phys - convert translated percpu address to physical address + * @addr: the address to be converted to physical address + * +diff -Nur linux-4.9.28.orig/mm/slab.h linux-4.9.28/mm/slab.h +--- linux-4.9.28.orig/mm/slab.h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/slab.h 2017-05-19 03:37:25.210177882 +0200 +@@ -426,7 +426,11 @@ + * The slab lists for all objects. + */ + struct kmem_cache_node { ++#ifdef CONFIG_SLUB ++ raw_spinlock_t list_lock; ++#else + spinlock_t list_lock; ++#endif + + #ifdef CONFIG_SLAB + struct list_head slabs_partial; /* partial list first, better asm code */ +diff -Nur linux-4.9.28.orig/mm/slub.c linux-4.9.28/mm/slub.c +--- linux-4.9.28.orig/mm/slub.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/slub.c 2017-05-19 03:37:25.210177882 +0200 +@@ -1141,7 +1141,7 @@ + unsigned long uninitialized_var(flags); + int ret = 0; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + slab_lock(page); + + if (s->flags & SLAB_CONSISTENCY_CHECKS) { +@@ -1176,7 +1176,7 @@ + bulk_cnt, cnt); + + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + if (!ret) + slab_fix(s, "Object at 0x%p not freed", object); + return ret; +@@ -1304,6 +1304,12 @@ + + #endif /* CONFIG_SLUB_DEBUG */ + ++struct slub_free_list { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); ++ + /* + * Hooks for other subsystems that check memory allocations. In a typical + * production configuration these hooks all should produce no code at all. +@@ -1527,10 +1533,17 @@ + void *start, *p; + int idx, order; + bool shuffle; ++ bool enableirqs = false; + + flags &= gfp_allowed_mask; + + if (gfpflags_allow_blocking(flags)) ++ enableirqs = true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state == SYSTEM_RUNNING) ++ enableirqs = true; ++#endif ++ if (enableirqs) + local_irq_enable(); + + flags |= s->allocflags; +@@ -1605,7 +1618,7 @@ + page->frozen = 1; + + out: +- if (gfpflags_allow_blocking(flags)) ++ if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; +@@ -1664,6 +1677,16 @@ + __free_pages(page, order); + } + ++static void free_delayed(struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_slab(page->slab_cache, page); ++ } ++} ++ + #define need_reserve_slab_rcu \ + (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) + +@@ -1695,6 +1718,12 @@ + } + + call_rcu(head, rcu_free_slab); ++ } else if (irqs_disabled()) { ++ struct slub_free_list *f = this_cpu_ptr(&slub_free_list); ++ ++ raw_spin_lock(&f->lock); ++ list_add(&page->lru, &f->list); ++ raw_spin_unlock(&f->lock); + } else + __free_slab(s, page); + } +@@ -1802,7 +1831,7 @@ + if (!n || !n->nr_partial) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, lru) { + void *t; + +@@ -1827,7 +1856,7 @@ + break; + + } +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return object; + } + +@@ -2073,7 +2102,7 @@ + * that acquire_slab() will see a slab page that + * is frozen + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } else { + m = M_FULL; +@@ -2084,7 +2113,7 @@ + * slabs from diagnostic functions will not see + * any frozen slabs. + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } + +@@ -2119,7 +2148,7 @@ + goto redo; + + if (lock) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + if (m == M_FREE) { + stat(s, DEACTIVATE_EMPTY); +@@ -2151,10 +2180,10 @@ + n2 = get_node(s, page_to_nid(page)); + if (n != n2) { + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + n = n2; +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + + do { +@@ -2183,7 +2212,7 @@ + } + + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + while (discard_page) { + page = discard_page; +@@ -2222,14 +2251,21 @@ + pobjects = oldpage->pobjects; + pages = oldpage->pages; + if (drain && pobjects > s->cpu_partial) { ++ struct slub_free_list *f; + unsigned long flags; ++ LIST_HEAD(tofree); + /* + * partial array is full. Move the existing + * set to the per node partial list. + */ + local_irq_save(flags); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); ++ f = this_cpu_ptr(&slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(&tofree); + oldpage = NULL; + pobjects = 0; + pages = 0; +@@ -2301,7 +2337,22 @@ + + static void flush_all(struct kmem_cache *s) + { ++ LIST_HEAD(tofree); ++ int cpu; ++ + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); ++ for_each_online_cpu(cpu) { ++ struct slub_free_list *f; ++ ++ if (!has_cpu_slab(cpu, s)) ++ continue; ++ ++ f = &per_cpu(slub_free_list, cpu); ++ raw_spin_lock_irq(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock_irq(&f->lock); ++ free_delayed(&tofree); ++ } + } + + /* +@@ -2356,10 +2407,10 @@ + unsigned long x = 0; + struct page *page; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return x; + } + #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ +@@ -2497,8 +2548,10 @@ + * already disabled (which is the case for bulk allocation). + */ + static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, +- unsigned long addr, struct kmem_cache_cpu *c) ++ unsigned long addr, struct kmem_cache_cpu *c, ++ struct list_head *to_free) + { ++ struct slub_free_list *f; + void *freelist; + struct page *page; + +@@ -2558,6 +2611,13 @@ + VM_BUG_ON(!c->page->frozen); + c->freelist = get_freepointer(s, freelist); + c->tid = next_tid(c->tid); ++ ++out: ++ f = this_cpu_ptr(&slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, to_free); ++ raw_spin_unlock(&f->lock); ++ + return freelist; + + new_slab: +@@ -2589,7 +2649,7 @@ + deactivate_slab(s, page, get_freepointer(s, freelist)); + c->page = NULL; + c->freelist = NULL; +- return freelist; ++ goto out; + } + + /* +@@ -2601,6 +2661,7 @@ + { + void *p; + unsigned long flags; ++ LIST_HEAD(tofree); + + local_irq_save(flags); + #ifdef CONFIG_PREEMPT +@@ -2612,8 +2673,9 @@ + c = this_cpu_ptr(s->cpu_slab); + #endif + +- p = ___slab_alloc(s, gfpflags, node, addr, c); ++ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); + local_irq_restore(flags); ++ free_delayed(&tofree); + return p; + } + +@@ -2799,7 +2861,7 @@ + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -2831,7 +2893,7 @@ + * Otherwise the list_lock will synchronize with + * other processors updating the list of slabs. + */ +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + } + } +@@ -2873,7 +2935,7 @@ + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return; + + slab_empty: +@@ -2888,7 +2950,7 @@ + remove_full(s, n, page); + } + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + stat(s, FREE_SLAB); + discard_slab(s, page); + } +@@ -3093,6 +3155,7 @@ + void **p) + { + struct kmem_cache_cpu *c; ++ LIST_HEAD(to_free); + int i; + + /* memcg and kmem_cache debug support */ +@@ -3116,7 +3179,7 @@ + * of re-populating per CPU c->freelist + */ + p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, +- _RET_IP_, c); ++ _RET_IP_, c, &to_free); + if (unlikely(!p[i])) + goto error; + +@@ -3128,6 +3191,7 @@ + } + c->tid = next_tid(c->tid); + local_irq_enable(); ++ free_delayed(&to_free); + + /* Clear memory outside IRQ disabled fastpath loop */ + if (unlikely(flags & __GFP_ZERO)) { +@@ -3275,7 +3339,7 @@ + init_kmem_cache_node(struct kmem_cache_node *n) + { + n->nr_partial = 0; +- spin_lock_init(&n->list_lock); ++ raw_spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); +@@ -3619,6 +3683,10 @@ + const char *text) + { + #ifdef CONFIG_SLUB_DEBUG ++#ifdef CONFIG_PREEMPT_RT_BASE ++ /* XXX move out of irq-off section */ ++ slab_err(s, page, text, s->name); ++#else + void *addr = page_address(page); + void *p; + unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * +@@ -3639,6 +3707,7 @@ + slab_unlock(page); + kfree(map); + #endif ++#endif + } + + /* +@@ -3652,7 +3721,7 @@ + struct page *page, *h; + + BUG_ON(irqs_disabled()); +- spin_lock_irq(&n->list_lock); ++ raw_spin_lock_irq(&n->list_lock); + list_for_each_entry_safe(page, h, &n->partial, lru) { + if (!page->inuse) { + remove_partial(n, page); +@@ -3662,7 +3731,7 @@ + "Objects remaining in %s on __kmem_cache_shutdown()"); + } + } +- spin_unlock_irq(&n->list_lock); ++ raw_spin_unlock_irq(&n->list_lock); + + list_for_each_entry_safe(page, h, &discard, lru) + discard_slab(s, page); +@@ -3905,7 +3974,7 @@ + for (i = 0; i < SHRINK_PROMOTE_MAX; i++) + INIT_LIST_HEAD(promote + i); + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + /* + * Build lists of slabs to discard or promote. +@@ -3936,7 +4005,7 @@ + for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) + list_splice(promote + i, &n->partial); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, &discard, lru) +@@ -4112,6 +4181,12 @@ + { + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); ++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); ++ } + + if (debug_guardpage_minorder()) + slub_max_order = 0; +@@ -4320,7 +4395,7 @@ + struct page *page; + unsigned long flags; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + list_for_each_entry(page, &n->partial, lru) { + validate_slab_slab(s, page, map); +@@ -4342,7 +4417,7 @@ + s->name, count, atomic_long_read(&n->nr_slabs)); + + out: +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return count; + } + +@@ -4530,12 +4605,12 @@ + if (!atomic_long_read(&n->nr_slabs)) + continue; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + process_slab(&t, s, page, alloc, map); + list_for_each_entry(page, &n->full, lru) + process_slab(&t, s, page, alloc, map); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { +diff -Nur linux-4.9.28.orig/mm/swap.c linux-4.9.28/mm/swap.c +--- linux-4.9.28.orig/mm/swap.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/swap.c 2017-05-19 03:37:25.210177882 +0200 +@@ -32,6 +32,7 @@ + #include <linux/memcontrol.h> + #include <linux/gfp.h> + #include <linux/uio.h> ++#include <linux/locallock.h> + #include <linux/hugetlb.h> + #include <linux/page_idle.h> + +@@ -50,6 +51,8 @@ + #ifdef CONFIG_SMP + static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); + #endif ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); + + /* + * This path almost never happens for VM activity - pages are normally +@@ -240,11 +243,11 @@ + unsigned long flags; + + get_page(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = this_cpu_ptr(&lru_rotate_pvecs); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -294,12 +297,13 @@ + { + page = compound_head(page); + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ activate_page_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swapvec_lock, activate_page_pvecs); + } + } + +@@ -326,7 +330,7 @@ + + static void __lru_cache_activate_page(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + int i; + + /* +@@ -348,7 +352,7 @@ + } + } + +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /* +@@ -390,12 +394,12 @@ + + static void __lru_cache_add(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + __pagevec_lru_add(pvec); +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /** +@@ -593,9 +597,15 @@ + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ local_lock_irqsave_on(rotate_lock, flags, cpu); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore_on(rotate_lock, flags, cpu); ++#else ++ local_lock_irqsave(rotate_lock, flags); ++ pagevec_move_tail(pvec); ++ local_unlock_irqrestore(rotate_lock, flags); ++#endif + } + + pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); +@@ -627,11 +637,12 @@ + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_file_pvecs); + + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); +- put_cpu_var(lru_deactivate_file_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); + } + } + +@@ -646,27 +657,31 @@ + void deactivate_page(struct page *page) + { + if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); +- put_cpu_var(lru_deactivate_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_pvecs); + } + } + + void lru_add_drain(void) + { +- lru_add_drain_cpu(get_cpu()); +- put_cpu(); ++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); ++ local_unlock_cpu(swapvec_lock); + } + +-static void lru_add_drain_per_cpu(struct work_struct *dummy) ++#ifdef CONFIG_PREEMPT_RT_BASE ++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) + { +- lru_add_drain(); ++ local_lock_on(swapvec_lock, cpu); ++ lru_add_drain_cpu(cpu); ++ local_unlock_on(swapvec_lock, cpu); + } + +-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); ++#else + + /* + * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM +@@ -686,6 +701,22 @@ + } + early_initcall(lru_init); + ++static void lru_add_drain_per_cpu(struct work_struct *dummy) ++{ ++ lru_add_drain(); ++} ++ ++static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); ++static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) ++{ ++ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); ++ ++ INIT_WORK(work, lru_add_drain_per_cpu); ++ queue_work_on(cpu, lru_add_drain_wq, work); ++ cpumask_set_cpu(cpu, has_work); ++} ++#endif ++ + void lru_add_drain_all(void) + { + static DEFINE_MUTEX(lock); +@@ -697,21 +728,18 @@ + cpumask_clear(&has_work); + + for_each_online_cpu(cpu) { +- struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); +- + if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || + pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || +- need_activate_page_drain(cpu)) { +- INIT_WORK(work, lru_add_drain_per_cpu); +- queue_work_on(cpu, lru_add_drain_wq, work); +- cpumask_set_cpu(cpu, &has_work); +- } ++ need_activate_page_drain(cpu)) ++ remote_lru_add_drain(cpu, &has_work); + } + ++#ifndef CONFIG_PREEMPT_RT_BASE + for_each_cpu(cpu, &has_work) + flush_work(&per_cpu(lru_add_drain_work, cpu)); ++#endif + + put_online_cpus(); + mutex_unlock(&lock); +diff -Nur linux-4.9.28.orig/mm/truncate.c linux-4.9.28/mm/truncate.c +--- linux-4.9.28.orig/mm/truncate.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/truncate.c 2017-05-19 03:37:25.214178037 +0200 +@@ -62,9 +62,12 @@ + * protected by mapping->tree_lock. + */ + if (!workingset_node_shadows(node) && +- !list_empty(&node->private_list)) +- list_lru_del(&workingset_shadow_nodes, ++ !list_empty(&node->private_list)) { ++ local_lock(workingset_shadow_lock); ++ list_lru_del(&__workingset_shadow_nodes, + &node->private_list); ++ local_unlock(workingset_shadow_lock); ++ } + __radix_tree_delete_node(&mapping->page_tree, node); + unlock: + spin_unlock_irq(&mapping->tree_lock); +diff -Nur linux-4.9.28.orig/mm/vmalloc.c linux-4.9.28/mm/vmalloc.c +--- linux-4.9.28.orig/mm/vmalloc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/vmalloc.c 2017-05-19 03:37:25.214178037 +0200 +@@ -845,7 +845,7 @@ + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + void *vaddr; + + node = numa_node_id(); +@@ -888,11 +888,12 @@ + BUG_ON(err); + radix_tree_preload_end(); + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + spin_lock(&vbq->lock); + list_add_tail_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vaddr; + } +@@ -961,6 +962,7 @@ + struct vmap_block *vb; + void *vaddr = NULL; + unsigned int order; ++ int cpu; + + BUG_ON(offset_in_page(size)); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -975,7 +977,8 @@ + order = get_order(size); + + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + unsigned long pages_off; + +@@ -998,7 +1001,7 @@ + break; + } + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + /* Allocate new block if nothing was found */ +diff -Nur linux-4.9.28.orig/mm/vmstat.c linux-4.9.28/mm/vmstat.c +--- linux-4.9.28.orig/mm/vmstat.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/vmstat.c 2017-05-19 03:37:25.214178037 +0200 +@@ -245,6 +245,7 @@ + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -254,6 +255,7 @@ + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -265,6 +267,7 @@ + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -274,6 +277,7 @@ + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_node_page_state); + +@@ -306,6 +310,7 @@ + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -314,6 +319,7 @@ + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) +@@ -322,6 +328,7 @@ + s8 __percpu *p = pcp->vm_node_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -330,6 +337,7 @@ + node_page_state_add(v + overstep, pgdat, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -350,6 +358,7 @@ + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -358,6 +367,7 @@ + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) +@@ -366,6 +376,7 @@ + s8 __percpu *p = pcp->vm_node_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -374,6 +385,7 @@ + node_page_state_add(v - overstep, pgdat, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +diff -Nur linux-4.9.28.orig/mm/workingset.c linux-4.9.28/mm/workingset.c +--- linux-4.9.28.orig/mm/workingset.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/workingset.c 2017-05-19 03:37:25.214178037 +0200 +@@ -334,7 +334,8 @@ + * point where they would still be useful. + */ + +-struct list_lru workingset_shadow_nodes; ++struct list_lru __workingset_shadow_nodes; ++DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock); + + static unsigned long count_shadow_nodes(struct shrinker *shrinker, + struct shrink_control *sc) +@@ -344,9 +345,9 @@ + unsigned long pages; + + /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ +- local_irq_disable(); +- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); +- local_irq_enable(); ++ local_lock_irq(workingset_shadow_lock); ++ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc); ++ local_unlock_irq(workingset_shadow_lock); + + if (sc->memcg) { + pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, +@@ -438,9 +439,9 @@ + spin_unlock(&mapping->tree_lock); + ret = LRU_REMOVED_RETRY; + out: +- local_irq_enable(); ++ local_unlock_irq(workingset_shadow_lock); + cond_resched(); +- local_irq_disable(); ++ local_lock_irq(workingset_shadow_lock); + spin_lock(lru_lock); + return ret; + } +@@ -451,10 +452,10 @@ + unsigned long ret; + + /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ +- local_irq_disable(); +- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, ++ local_lock_irq(workingset_shadow_lock); ++ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc, + shadow_lru_isolate, NULL); +- local_irq_enable(); ++ local_unlock_irq(workingset_shadow_lock); + return ret; + } + +@@ -492,7 +493,7 @@ + pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", + timestamp_bits, max_order, bucket_order); + +- ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key); ++ ret = __list_lru_init(&__workingset_shadow_nodes, true, &shadow_nodes_key); + if (ret) + goto err; + ret = register_shrinker(&workingset_shadow_shrinker); +@@ -500,7 +501,7 @@ + goto err_list_lru; + return 0; + err_list_lru: +- list_lru_destroy(&workingset_shadow_nodes); ++ list_lru_destroy(&__workingset_shadow_nodes); + err: + return ret; + } +diff -Nur linux-4.9.28.orig/mm/zsmalloc.c linux-4.9.28/mm/zsmalloc.c +--- linux-4.9.28.orig/mm/zsmalloc.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/mm/zsmalloc.c 2017-05-19 03:37:25.214178037 +0200 +@@ -53,6 +53,7 @@ + #include <linux/mount.h> + #include <linux/migrate.h> + #include <linux/pagemap.h> ++#include <linux/locallock.h> + + #define ZSPAGE_MAGIC 0x58 + +@@ -70,9 +71,22 @@ + */ + #define ZS_MAX_ZSPAGE_ORDER 2 + #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) +- + #define ZS_HANDLE_SIZE (sizeof(unsigned long)) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct zsmalloc_handle { ++ unsigned long addr; ++ struct mutex lock; ++}; ++ ++#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) ++ ++#else ++ ++#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) ++#endif ++ + /* + * Object location (<PFN>, <obj_idx>) is encoded as + * as single (unsigned long) handle value. +@@ -327,7 +341,7 @@ + + static int create_cache(struct zs_pool *pool) + { +- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, ++ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, + 0, 0, NULL); + if (!pool->handle_cachep) + return 1; +@@ -351,10 +365,27 @@ + + static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) + { +- return (unsigned long)kmem_cache_alloc(pool->handle_cachep, +- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); ++ void *p; ++ ++ p = kmem_cache_alloc(pool->handle_cachep, ++ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (p) { ++ struct zsmalloc_handle *zh = p; ++ ++ mutex_init(&zh->lock); ++ } ++#endif ++ return (unsigned long)p; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) ++{ ++ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); ++} ++#endif ++ + static void cache_free_handle(struct zs_pool *pool, unsigned long handle) + { + kmem_cache_free(pool->handle_cachep, (void *)handle); +@@ -373,12 +404,18 @@ + + static void record_obj(unsigned long handle, unsigned long obj) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ WRITE_ONCE(zh->addr, obj); ++#else + /* + * lsb of @obj represents handle lock while other bits + * represent object value the handle is pointing so + * updating shouldn't do store tearing. + */ + WRITE_ONCE(*(unsigned long *)handle, obj); ++#endif + } + + /* zpool driver */ +@@ -467,6 +504,7 @@ + + /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ + static DEFINE_PER_CPU(struct mapping_area, zs_map_area); ++static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); + + static bool is_zspage_isolated(struct zspage *zspage) + { +@@ -902,7 +940,13 @@ + + static unsigned long handle_to_obj(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return zh->addr; ++#else + return *(unsigned long *)handle; ++#endif + } + + static unsigned long obj_to_head(struct page *page, void *obj) +@@ -916,22 +960,46 @@ + + static inline int testpin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_is_locked(&zh->lock); ++#else + return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static inline int trypin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_trylock(&zh->lock); ++#else + return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void pin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_lock(&zh->lock); ++#else + bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void unpin_tag(unsigned long handle) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); ++ ++ return mutex_unlock(&zh->lock); ++#else + bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); ++#endif + } + + static void reset_page(struct page *page) +@@ -1423,7 +1491,7 @@ + class = pool->size_class[class_idx]; + off = (class->size * obj_idx) & ~PAGE_MASK; + +- area = &get_cpu_var(zs_map_area); ++ area = &get_locked_var(zs_map_area_lock, zs_map_area); + area->vm_mm = mm; + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ +@@ -1477,7 +1545,7 @@ + + __zs_unmap_object(area, pages, off, class->size); + } +- put_cpu_var(zs_map_area); ++ put_locked_var(zs_map_area_lock, zs_map_area); + + migrate_read_unlock(zspage); + unpin_tag(handle); +diff -Nur linux-4.9.28.orig/net/core/dev.c linux-4.9.28/net/core/dev.c +--- linux-4.9.28.orig/net/core/dev.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/core/dev.c 2017-05-19 03:37:25.214178037 +0200 +@@ -190,6 +190,7 @@ + static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); + + static seqcount_t devnet_rename_seq; ++static DEFINE_MUTEX(devnet_rename_mutex); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -211,14 +212,14 @@ + static inline void rps_lock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_lock(&sd->input_pkt_queue.lock); ++ raw_spin_lock(&sd->input_pkt_queue.raw_lock); + #endif + } + + static inline void rps_unlock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_unlock(&sd->input_pkt_queue.lock); ++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + #endif + } + +@@ -888,7 +889,8 @@ + strcpy(name, dev->name); + rcu_read_unlock(); + if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); ++ mutex_lock(&devnet_rename_mutex); ++ mutex_unlock(&devnet_rename_mutex); + goto retry; + } + +@@ -1157,20 +1159,17 @@ + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __raw_write_seqcount_begin(&devnet_rename_seq); + +- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); +- return 0; +- } ++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) ++ goto outunlock; + + memcpy(oldname, dev->name, IFNAMSIZ); + + err = dev_get_valid_name(net, dev, newname); +- if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); +- return err; +- } ++ if (err < 0) ++ goto outunlock; + + if (oldname[0] && !strchr(oldname, '%')) + netdev_info(dev, "renamed from %s\n", oldname); +@@ -1183,11 +1182,12 @@ + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +- write_seqcount_end(&devnet_rename_seq); +- return ret; ++ err = ret; ++ goto outunlock; + } + +- write_seqcount_end(&devnet_rename_seq); ++ __raw_write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + + netdev_adjacent_rename_links(dev, oldname); + +@@ -1208,7 +1208,8 @@ + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __raw_write_seqcount_begin(&devnet_rename_seq); + memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +@@ -1221,6 +1222,11 @@ + } + + return err; ++ ++outunlock: ++ __raw_write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); ++ return err; + } + + /** +@@ -2285,6 +2291,7 @@ + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2366,6 +2373,7 @@ + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -3100,7 +3108,11 @@ + * This permits qdisc->running owner to get the lock more + * often and dequeue packets faster. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ contended = true; ++#else + contended = qdisc_is_running(q); ++#endif + if (unlikely(contended)) + spin_lock(&q->busylock); + +@@ -3163,8 +3175,10 @@ + #define skb_update_prio(skb) + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL + DEFINE_PER_CPU(int, xmit_recursion); + EXPORT_SYMBOL(xmit_recursion); ++#endif + + /** + * dev_loopback_xmit - loop back @skb +@@ -3398,8 +3412,7 @@ + int cpu = smp_processor_id(); /* ok because BHs are off */ + + if (txq->xmit_lock_owner != cpu) { +- if (unlikely(__this_cpu_read(xmit_recursion) > +- XMIT_RECURSION_LIMIT)) ++ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) + goto recursion_alert; + + skb = validate_xmit_skb(skb, dev); +@@ -3409,9 +3422,9 @@ + HARD_TX_LOCK(dev, txq, cpu); + + if (!netif_xmit_stopped(txq)) { +- __this_cpu_inc(xmit_recursion); ++ xmit_rec_inc(); + skb = dev_hard_start_xmit(skb, dev, txq, &rc); +- __this_cpu_dec(xmit_recursion); ++ xmit_rec_dec(); + if (dev_xmit_complete(rc)) { + HARD_TX_UNLOCK(dev, txq); + goto out; +@@ -3785,6 +3798,7 @@ + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -3803,7 +3817,7 @@ + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu; + +- preempt_disable(); ++ migrate_disable(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); +@@ -3813,13 +3827,13 @@ + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + + rcu_read_unlock(); +- preempt_enable(); ++ migrate_enable(); + } else + #endif + { + unsigned int qtail; +- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); +- put_cpu(); ++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); ++ put_cpu_light(); + } + return ret; + } +@@ -3853,11 +3867,9 @@ + + trace_netif_rx_ni_entry(skb); + +- preempt_disable(); ++ local_bh_disable(); + err = netif_rx_internal(skb); +- if (local_softirq_pending()) +- do_softirq(); +- preempt_enable(); ++ local_bh_enable(); + + return err; + } +@@ -4336,7 +4348,7 @@ + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev->reg_state == NETREG_UNREGISTERING) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } +@@ -4346,11 +4358,14 @@ + skb_queue_walk_safe(&sd->process_queue, skb, tmp) { + if (skb->dev->reg_state == NETREG_UNREGISTERING) { + __skb_unlink(skb, &sd->process_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } ++ if (!skb_queue_empty(&sd->tofree_queue)) ++ raise_softirq_irqoff(NET_RX_SOFTIRQ); + local_bh_enable(); ++ + } + + static void flush_all_backlogs(void) +@@ -4831,6 +4846,7 @@ + sd->rps_ipi_list = NULL; + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Send pending IPI's to kick RPS processing on remote cpus. */ + while (remsd) { +@@ -4844,6 +4860,7 @@ + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +@@ -4873,7 +4890,9 @@ + while (again) { + struct sk_buff *skb; + ++ local_irq_disable(); + while ((skb = __skb_dequeue(&sd->process_queue))) { ++ local_irq_enable(); + rcu_read_lock(); + __netif_receive_skb(skb); + rcu_read_unlock(); +@@ -4881,9 +4900,9 @@ + if (++work >= quota) + return work; + ++ local_irq_disable(); + } + +- local_irq_disable(); + rps_lock(sd); + if (skb_queue_empty(&sd->input_pkt_queue)) { + /* +@@ -4921,9 +4940,11 @@ + local_irq_save(flags); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * __napi_schedule_irqoff - schedule for receive + * @n: entry to schedule +@@ -4935,6 +4956,7 @@ + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + } + EXPORT_SYMBOL(__napi_schedule_irqoff); ++#endif + + void __napi_complete(struct napi_struct *n) + { +@@ -5224,13 +5246,21 @@ + struct softnet_data *sd = this_cpu_ptr(&softnet_data); + unsigned long time_limit = jiffies + 2; + int budget = netdev_budget; ++ struct sk_buff_head tofree_q; ++ struct sk_buff *skb; + LIST_HEAD(list); + LIST_HEAD(repoll); + ++ __skb_queue_head_init(&tofree_q); ++ + local_irq_disable(); ++ skb_queue_splice_init(&sd->tofree_queue, &tofree_q); + list_splice_init(&sd->poll_list, &list); + local_irq_enable(); + ++ while ((skb = __skb_dequeue(&tofree_q))) ++ kfree_skb(skb); ++ + for (;;) { + struct napi_struct *n; + +@@ -5261,7 +5291,7 @@ + list_splice_tail(&repoll, &list); + list_splice(&list, &sd->poll_list); + if (!list_empty(&sd->poll_list)) +- __raise_softirq_irqoff(NET_RX_SOFTIRQ); ++ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); + + net_rps_action_and_irq_enable(sd); + } +@@ -8022,16 +8052,20 @@ + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Process offline CPU's input_pkt_queue */ + while ((skb = __skb_dequeue(&oldsd->process_queue))) { + netif_rx_ni(skb); + input_queue_head_incr(oldsd); + } +- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { ++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { + netif_rx_ni(skb); + input_queue_head_incr(oldsd); + } ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return NOTIFY_OK; + } +@@ -8336,8 +8370,9 @@ + + INIT_WORK(flush, flush_backlog); + +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + INIT_LIST_HEAD(&sd->poll_list); + sd->output_queue_tailp = &sd->output_queue; + #ifdef CONFIG_RPS +diff -Nur linux-4.9.28.orig/net/core/filter.c linux-4.9.28/net/core/filter.c +--- linux-4.9.28.orig/net/core/filter.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/core/filter.c 2017-05-19 03:37:25.214178037 +0200 +@@ -1645,7 +1645,7 @@ + { + int ret; + +- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { ++ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + kfree_skb(skb); + return -ENETDOWN; +@@ -1653,9 +1653,9 @@ + + skb->dev = dev; + +- __this_cpu_inc(xmit_recursion); ++ xmit_rec_inc(); + ret = dev_queue_xmit(skb); +- __this_cpu_dec(xmit_recursion); ++ xmit_rec_dec(); + + return ret; + } +diff -Nur linux-4.9.28.orig/net/core/gen_estimator.c linux-4.9.28/net/core/gen_estimator.c +--- linux-4.9.28.orig/net/core/gen_estimator.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/core/gen_estimator.c 2017-05-19 03:37:25.214178037 +0200 +@@ -84,7 +84,7 @@ + struct gnet_stats_basic_packed *bstats; + struct gnet_stats_rate_est64 *rate_est; + spinlock_t *stats_lock; +- seqcount_t *running; ++ net_seqlock_t *running; + int ewma_log; + u32 last_packets; + unsigned long avpps; +@@ -213,7 +213,7 @@ + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct gnet_stats_rate_est64 *rate_est, + spinlock_t *stats_lock, +- seqcount_t *running, ++ net_seqlock_t *running, + struct nlattr *opt) + { + struct gen_estimator *est; +@@ -309,7 +309,7 @@ + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct gnet_stats_rate_est64 *rate_est, + spinlock_t *stats_lock, +- seqcount_t *running, struct nlattr *opt) ++ net_seqlock_t *running, struct nlattr *opt) + { + gen_kill_estimator(bstats, rate_est); + return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt); +diff -Nur linux-4.9.28.orig/net/core/gen_stats.c linux-4.9.28/net/core/gen_stats.c +--- linux-4.9.28.orig/net/core/gen_stats.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/core/gen_stats.c 2017-05-19 03:37:25.214178037 +0200 +@@ -130,7 +130,7 @@ + } + + void +-__gnet_stats_copy_basic(const seqcount_t *running, ++__gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) +@@ -143,10 +143,10 @@ + } + do { + if (running) +- seq = read_seqcount_begin(running); ++ seq = net_seq_begin(running); + bstats->bytes = b->bytes; + bstats->packets = b->packets; +- } while (running && read_seqcount_retry(running, seq)); ++ } while (running && net_seq_retry(running, seq)); + } + EXPORT_SYMBOL(__gnet_stats_copy_basic); + +@@ -164,7 +164,7 @@ + * if the room in the socket buffer was not sufficient. + */ + int +-gnet_stats_copy_basic(const seqcount_t *running, ++gnet_stats_copy_basic(net_seqlock_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b) +diff -Nur linux-4.9.28.orig/net/core/skbuff.c linux-4.9.28/net/core/skbuff.c +--- linux-4.9.28.orig/net/core/skbuff.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/core/skbuff.c 2017-05-19 03:37:25.218178191 +0200 +@@ -64,6 +64,7 @@ + #include <linux/errqueue.h> + #include <linux/prefetch.h> + #include <linux/if_vlan.h> ++#include <linux/locallock.h> + + #include <net/protocol.h> + #include <net/dst.h> +@@ -360,6 +361,8 @@ + + static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); + static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); ++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); ++static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); + + static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +@@ -367,10 +370,10 @@ + unsigned long flags; + void *data; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + nc = this_cpu_ptr(&netdev_alloc_cache); + data = __alloc_page_frag(nc, fragsz, gfp_mask); +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + return data; + } + +@@ -389,9 +392,13 @@ + + static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; ++ void *data; + +- return __alloc_page_frag(&nc->page, fragsz, gfp_mask); ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ data = __alloc_page_frag(&nc->page, fragsz, gfp_mask); ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ return data; + } + + void *napi_alloc_frag(unsigned int fragsz) +@@ -438,13 +445,13 @@ + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + + nc = this_cpu_ptr(&netdev_alloc_cache); + data = __alloc_page_frag(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + + if (unlikely(!data)) + return NULL; +@@ -485,9 +492,10 @@ + struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + gfp_t gfp_mask) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + struct sk_buff *skb; + void *data; ++ bool pfmemalloc; + + len += NET_SKB_PAD + NET_IP_ALIGN; + +@@ -505,7 +513,10 @@ + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + data = __alloc_page_frag(&nc->page, len, gfp_mask); ++ pfmemalloc = nc->page.pfmemalloc; ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + if (unlikely(!data)) + return NULL; + +@@ -516,7 +527,7 @@ + } + + /* use OR instead of assignment to avoid clearing of bits in mask */ +- if (nc->page.pfmemalloc) ++ if (pfmemalloc) + skb->pfmemalloc = 1; + skb->head_frag = 1; + +@@ -760,23 +771,26 @@ + + void __kfree_skb_flush(void) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + /* flush skb_cache if containing objects */ + if (nc->skb_count) { + kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, + nc->skb_cache); + nc->skb_count = 0; + } ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + } + + static inline void _kfree_skb_defer(struct sk_buff *skb) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + + /* drop skb->head and call any destructors for packet */ + skb_release_all(skb); + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + /* record skb to CPU local list */ + nc->skb_cache[nc->skb_count++] = skb; + +@@ -791,6 +805,7 @@ + nc->skb_cache); + nc->skb_count = 0; + } ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + } + void __kfree_skb_defer(struct sk_buff *skb) + { +diff -Nur linux-4.9.28.orig/net/core/sock.c linux-4.9.28/net/core/sock.c +--- linux-4.9.28.orig/net/core/sock.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/core/sock.c 2017-05-19 03:37:25.218178191 +0200 +@@ -2499,12 +2499,11 @@ + if (sk->sk_lock.owned) + __lock_sock(sk); + sk->sk_lock.owned = 1; +- spin_unlock(&sk->sk_lock.slock); ++ spin_unlock_bh(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); +- local_bh_enable(); + } + EXPORT_SYMBOL(lock_sock_nested); + +diff -Nur linux-4.9.28.orig/net/ipv4/icmp.c linux-4.9.28/net/ipv4/icmp.c +--- linux-4.9.28.orig/net/ipv4/icmp.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/ipv4/icmp.c 2017-05-19 03:37:25.218178191 +0200 +@@ -69,6 +69,7 @@ + #include <linux/jiffies.h> + #include <linux/kernel.h> + #include <linux/fcntl.h> ++#include <linux/sysrq.h> + #include <linux/socket.h> + #include <linux/in.h> + #include <linux/inet.h> +@@ -77,6 +78,7 @@ + #include <linux/string.h> + #include <linux/netfilter_ipv4.h> + #include <linux/slab.h> ++#include <linux/locallock.h> + #include <net/snmp.h> + #include <net/ip.h> + #include <net/route.h> +@@ -204,6 +206,8 @@ + * + * On SMP we have one ICMP socket per-cpu. + */ ++static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock); ++ + static struct sock *icmp_sk(struct net *net) + { + return *this_cpu_ptr(net->ipv4.icmp_sk); +@@ -215,12 +219,14 @@ + + local_bh_disable(); + ++ local_lock(icmp_sk_lock); + sk = icmp_sk(net); + + if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { + /* This can happen if the output path signals a + * dst_link_failure() for an outgoing ICMP packet. + */ ++ local_unlock(icmp_sk_lock); + local_bh_enable(); + return NULL; + } +@@ -230,6 +236,7 @@ + static inline void icmp_xmit_unlock(struct sock *sk) + { + spin_unlock_bh(&sk->sk_lock.slock); ++ local_unlock(icmp_sk_lock); + } + + int sysctl_icmp_msgs_per_sec __read_mostly = 1000; +@@ -358,6 +365,7 @@ + struct sock *sk; + struct sk_buff *skb; + ++ local_lock(icmp_sk_lock); + sk = icmp_sk(dev_net((*rt)->dst.dev)); + if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param, + icmp_param->data_len+icmp_param->head_len, +@@ -380,6 +388,7 @@ + skb->ip_summed = CHECKSUM_NONE; + ip_push_pending_frames(sk, fl4); + } ++ local_unlock(icmp_sk_lock); + } + + /* +@@ -891,6 +900,30 @@ + } + + /* ++ * 32bit and 64bit have different timestamp length, so we check for ++ * the cookie at offset 20 and verify it is repeated at offset 50 ++ */ ++#define CO_POS0 20 ++#define CO_POS1 50 ++#define CO_SIZE sizeof(int) ++#define ICMP_SYSRQ_SIZE 57 ++ ++/* ++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie ++ * pattern and if it matches send the next byte as a trigger to sysrq. ++ */ ++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) ++{ ++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); ++ char *p = skb->data; ++ ++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && ++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && ++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) ++ handle_sysrq(p[CO_POS0 + CO_SIZE]); ++} ++ ++/* + * Handle ICMP_ECHO ("ping") requests. + * + * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo +@@ -917,6 +950,11 @@ + icmp_param.data_len = skb->len; + icmp_param.head_len = sizeof(struct icmphdr); + icmp_reply(&icmp_param, skb); ++ ++ if (skb->len == ICMP_SYSRQ_SIZE && ++ net->ipv4.sysctl_icmp_echo_sysrq) { ++ icmp_check_sysrq(net, skb); ++ } + } + /* should there be an ICMP stat for ignored echos? */ + return true; +diff -Nur linux-4.9.28.orig/net/ipv4/sysctl_net_ipv4.c linux-4.9.28/net/ipv4/sysctl_net_ipv4.c +--- linux-4.9.28.orig/net/ipv4/sysctl_net_ipv4.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/ipv4/sysctl_net_ipv4.c 2017-05-19 03:37:25.218178191 +0200 +@@ -681,6 +681,13 @@ + .proc_handler = proc_dointvec + }, + { ++ .procname = "icmp_echo_sysrq", ++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec ++ }, ++ { + .procname = "icmp_ignore_bogus_error_responses", + .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, + .maxlen = sizeof(int), +diff -Nur linux-4.9.28.orig/net/ipv4/tcp_ipv4.c linux-4.9.28/net/ipv4/tcp_ipv4.c +--- linux-4.9.28.orig/net/ipv4/tcp_ipv4.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/ipv4/tcp_ipv4.c 2017-05-19 03:37:25.218178191 +0200 +@@ -62,6 +62,7 @@ + #include <linux/init.h> + #include <linux/times.h> + #include <linux/slab.h> ++#include <linux/locallock.h> + + #include <net/net_namespace.h> + #include <net/icmp.h> +@@ -568,6 +569,7 @@ + } + EXPORT_SYMBOL(tcp_v4_send_check); + ++static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock); + /* + * This routine will send an RST to the other tcp. + * +@@ -695,6 +697,8 @@ + offsetof(struct inet_timewait_sock, tw_bound_dev_if)); + + arg.tos = ip_hdr(skb)->tos; ++ ++ local_lock(tcp_sk_lock); + local_bh_disable(); + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), + skb, &TCP_SKB_CB(skb)->header.h4.opt, +@@ -704,6 +708,7 @@ + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); + local_bh_enable(); ++ local_unlock(tcp_sk_lock); + + #ifdef CONFIG_TCP_MD5SIG + out: +@@ -779,6 +784,7 @@ + if (oif) + arg.bound_dev_if = oif; + arg.tos = tos; ++ local_lock(tcp_sk_lock); + local_bh_disable(); + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), + skb, &TCP_SKB_CB(skb)->header.h4.opt, +@@ -787,6 +793,7 @@ + + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + local_bh_enable(); ++ local_unlock(tcp_sk_lock); + } + + static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) +diff -Nur linux-4.9.28.orig/net/mac80211/rx.c linux-4.9.28/net/mac80211/rx.c +--- linux-4.9.28.orig/net/mac80211/rx.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/mac80211/rx.c 2017-05-19 03:37:25.218178191 +0200 +@@ -4230,7 +4230,7 @@ + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +- WARN_ON_ONCE(softirq_count() == 0); ++ WARN_ON_ONCE_NONRT(softirq_count() == 0); + + if (WARN_ON(status->band >= NUM_NL80211_BANDS)) + goto drop; +diff -Nur linux-4.9.28.orig/net/netfilter/core.c linux-4.9.28/net/netfilter/core.c +--- linux-4.9.28.orig/net/netfilter/core.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/netfilter/core.c 2017-05-19 03:37:25.218178191 +0200 +@@ -22,12 +22,18 @@ + #include <linux/proc_fs.h> + #include <linux/mutex.h> + #include <linux/slab.h> ++#include <linux/locallock.h> + #include <linux/rcupdate.h> + #include <net/net_namespace.h> + #include <net/sock.h> + + #include "nf_internals.h" + ++#ifdef CONFIG_PREEMPT_RT_BASE ++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); ++EXPORT_PER_CPU_SYMBOL(xt_write_lock); ++#endif ++ + static DEFINE_MUTEX(afinfo_mutex); + + const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; +diff -Nur linux-4.9.28.orig/net/packet/af_packet.c linux-4.9.28/net/packet/af_packet.c +--- linux-4.9.28.orig/net/packet/af_packet.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/packet/af_packet.c 2017-05-19 03:37:25.218178191 +0200 +@@ -63,6 +63,7 @@ + #include <linux/if_packet.h> + #include <linux/wireless.h> + #include <linux/kernel.h> ++#include <linux/delay.h> + #include <linux/kmod.h> + #include <linux/slab.h> + #include <linux/vmalloc.h> +@@ -694,7 +695,7 @@ + if (BLOCK_NUM_PKTS(pbd)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + +@@ -956,7 +957,7 @@ + if (!(status & TP_STATUS_BLK_TMO)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + prb_close_block(pkc, pbd, po, status); +diff -Nur linux-4.9.28.orig/net/rds/ib_rdma.c linux-4.9.28/net/rds/ib_rdma.c +--- linux-4.9.28.orig/net/rds/ib_rdma.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/rds/ib_rdma.c 2017-05-19 03:37:25.218178191 +0200 +@@ -34,6 +34,7 @@ + #include <linux/slab.h> + #include <linux/rculist.h> + #include <linux/llist.h> ++#include <linux/delay.h> + + #include "rds_single_path.h" + #include "ib_mr.h" +@@ -210,7 +211,7 @@ + for_each_online_cpu(cpu) { + flag = &per_cpu(clean_list_grace, cpu); + while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) +- cpu_relax(); ++ cpu_chill(); + } + } + +diff -Nur linux-4.9.28.orig/net/rxrpc/security.c linux-4.9.28/net/rxrpc/security.c +--- linux-4.9.28.orig/net/rxrpc/security.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/rxrpc/security.c 2017-05-19 03:37:25.218178191 +0200 +@@ -19,9 +19,6 @@ + #include <keys/rxrpc-type.h> + #include "ar-internal.h" + +-static LIST_HEAD(rxrpc_security_methods); +-static DECLARE_RWSEM(rxrpc_security_sem); +- + static const struct rxrpc_security *rxrpc_security_types[] = { + [RXRPC_SECURITY_NONE] = &rxrpc_no_security, + #ifdef CONFIG_RXKAD +diff -Nur linux-4.9.28.orig/net/sched/sch_api.c linux-4.9.28/net/sched/sch_api.c +--- linux-4.9.28.orig/net/sched/sch_api.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/sched/sch_api.c 2017-05-19 03:37:25.218178191 +0200 +@@ -981,7 +981,7 @@ + rcu_assign_pointer(sch->stab, stab); + } + if (tca[TCA_RATE]) { +- seqcount_t *running; ++ net_seqlock_t *running; + + err = -EOPNOTSUPP; + if (sch->flags & TCQ_F_MQROOT) +diff -Nur linux-4.9.28.orig/net/sched/sch_generic.c linux-4.9.28/net/sched/sch_generic.c +--- linux-4.9.28.orig/net/sched/sch_generic.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/sched/sch_generic.c 2017-05-19 03:37:25.218178191 +0200 +@@ -425,7 +425,11 @@ + .ops = &noop_qdisc_ops, + .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), + .dev_queue = &noop_netdev_queue, ++#ifdef CONFIG_PREEMPT_RT_BASE ++ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), ++#else + .running = SEQCNT_ZERO(noop_qdisc.running), ++#endif + .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), + }; + EXPORT_SYMBOL(noop_qdisc); +@@ -624,9 +628,17 @@ + lockdep_set_class(&sch->busylock, + dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++ seqlock_init(&sch->running); ++ lockdep_set_class(&sch->running.seqcount, ++ dev->qdisc_running_key ?: &qdisc_running_key); ++ lockdep_set_class(&sch->running.lock, ++ dev->qdisc_running_key ?: &qdisc_running_key); ++#else + seqcount_init(&sch->running); + lockdep_set_class(&sch->running, + dev->qdisc_running_key ?: &qdisc_running_key); ++#endif + + sch->ops = ops; + sch->enqueue = ops->enqueue; +@@ -925,7 +937,7 @@ + /* Wait for outstanding qdisc_run calls. */ + list_for_each_entry(dev, head, close_list) + while (some_qdisc_is_busy(dev)) +- yield(); ++ msleep(1); + } + + void dev_deactivate(struct net_device *dev) +diff -Nur linux-4.9.28.orig/net/sunrpc/svc_xprt.c linux-4.9.28/net/sunrpc/svc_xprt.c +--- linux-4.9.28.orig/net/sunrpc/svc_xprt.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/net/sunrpc/svc_xprt.c 2017-05-19 03:37:25.218178191 +0200 +@@ -396,7 +396,7 @@ + goto out; + } + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = svc_pool_for_cpu(xprt->xpt_server, cpu); + + atomic_long_inc(&pool->sp_stats.packets); +@@ -432,7 +432,7 @@ + + atomic_long_inc(&pool->sp_stats.threads_woken); + wake_up_process(rqstp->rq_task); +- put_cpu(); ++ put_cpu_light(); + goto out; + } + rcu_read_unlock(); +@@ -453,7 +453,7 @@ + goto redo_search; + } + rqstp = NULL; +- put_cpu(); ++ put_cpu_light(); + out: + trace_svc_xprt_do_enqueue(xprt, rqstp); + } +diff -Nur linux-4.9.28.orig/scripts/mkcompile_h linux-4.9.28/scripts/mkcompile_h +--- linux-4.9.28.orig/scripts/mkcompile_h 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/scripts/mkcompile_h 2017-05-19 03:37:25.218178191 +0200 +@@ -4,7 +4,8 @@ + ARCH=$2 + SMP=$3 + PREEMPT=$4 +-CC=$5 ++RT=$5 ++CC=$6 + + vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } + +@@ -57,6 +58,7 @@ + CONFIG_FLAGS="" + if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi ++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + + # Truncate to maximum length +diff -Nur linux-4.9.28.orig/sound/core/pcm_native.c linux-4.9.28/sound/core/pcm_native.c +--- linux-4.9.28.orig/sound/core/pcm_native.c 2017-05-14 14:00:37.000000000 +0200 ++++ linux-4.9.28/sound/core/pcm_native.c 2017-05-19 03:37:25.222178345 +0200 +@@ -135,7 +135,7 @@ + void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) + { + if (!substream->pcm->nonatomic) +- local_irq_disable(); ++ local_irq_disable_nort(); + snd_pcm_stream_lock(substream); + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); +@@ -150,7 +150,7 @@ + { + snd_pcm_stream_unlock(substream); + if (!substream->pcm->nonatomic) +- local_irq_enable(); ++ local_irq_enable_nort(); + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); + +@@ -158,7 +158,7 @@ + { + unsigned long flags = 0; + if (!substream->pcm->nonatomic) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + snd_pcm_stream_lock(substream); + return flags; + } +@@ -176,7 +176,7 @@ + { + snd_pcm_stream_unlock(substream); + if (!substream->pcm->nonatomic) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); + |