summaryrefslogtreecommitdiff
path: root/target/linux
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-06-01 15:19:39 -0500
committerWaldemar Brodkorb <wbx@openadk.org>2015-06-01 15:21:28 -0500
commitebd9559f7c9255f59859b394efa08b76fcdd0364 (patch)
tree8f119b6d588ee1dbb28026d13a44b2e78ccbfa17 /target/linux
parent732a5c76d6a674a7ba79cdc90366b6a4f521389f (diff)
add realtime patch for 3.14
Diffstat (limited to 'target/linux')
-rw-r--r--target/linux/config/Config.in.kernel5
-rw-r--r--target/linux/patches/3.14.43/realtime.patch25916
2 files changed, 25919 insertions, 2 deletions
diff --git a/target/linux/config/Config.in.kernel b/target/linux/config/Config.in.kernel
index 296dba2fc..2c360d3eb 100644
--- a/target/linux/config/Config.in.kernel
+++ b/target/linux/config/Config.in.kernel
@@ -275,9 +275,10 @@ config ADK_KERNEL_HIGH_RES_TIMERS
comment "uses experimental external patch"
config ADK_KERNEL_PREEMPT_RT_FULL
bool "Enable Realtime support"
- depends on ADK_KERNEL_VERSION_3_18
+ depends on ADK_KERNEL_VERSION_3_18 \
+ || ADK_KERNEL_VERSION_3_14
help
- https://www.kernel.org/pub/linux/kernel/projects/rt/3.18/
+ https://www.kernel.org/pub/linux/kernel/projects/rt/
choice
prompt "Page size"
diff --git a/target/linux/patches/3.14.43/realtime.patch b/target/linux/patches/3.14.43/realtime.patch
new file mode 100644
index 000000000..bc38c7966
--- /dev/null
+++ b/target/linux/patches/3.14.43/realtime.patch
@@ -0,0 +1,25916 @@
+diff -Nur linux-3.14.43.orig/arch/alpha/mm/fault.c linux-3.14.43/arch/alpha/mm/fault.c
+--- linux-3.14.43.orig/arch/alpha/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/alpha/mm/fault.c 2015-05-31 15:35:42.433633774 -0500
+@@ -107,7 +107,7 @@
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic())
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+diff -Nur linux-3.14.43.orig/arch/arm/include/asm/cmpxchg.h linux-3.14.43/arch/arm/include/asm/cmpxchg.h
+--- linux-3.14.43.orig/arch/arm/include/asm/cmpxchg.h 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/include/asm/cmpxchg.h 2015-05-31 15:35:42.437633774 -0500
+@@ -127,6 +127,8 @@
+
+ #else /* min ARCH >= ARMv6 */
+
++#define __HAVE_ARCH_CMPXCHG 1
++
+ extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+ /*
+diff -Nur linux-3.14.43.orig/arch/arm/include/asm/futex.h linux-3.14.43/arch/arm/include/asm/futex.h
+--- linux-3.14.43.orig/arch/arm/include/asm/futex.h 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/include/asm/futex.h 2015-05-31 15:35:42.437633774 -0500
+@@ -90,6 +90,8 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
++ preempt_disable_rt();
++
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+@@ -101,6 +103,8 @@
+ : "cc", "memory");
+
+ *uval = val;
++
++ preempt_enable_rt();
+ return ret;
+ }
+
+diff -Nur linux-3.14.43.orig/arch/arm/include/asm/switch_to.h linux-3.14.43/arch/arm/include/asm/switch_to.h
+--- linux-3.14.43.orig/arch/arm/include/asm/switch_to.h 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/include/asm/switch_to.h 2015-05-31 15:35:42.437633774 -0500
+@@ -3,6 +3,13 @@
+
+ #include <linux/thread_info.h>
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ /*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -22,6 +29,7 @@
+
+ #define switch_to(prev,next,last) \
+ do { \
++ switch_kmaps(prev, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+ } while (0)
+
+diff -Nur linux-3.14.43.orig/arch/arm/include/asm/thread_info.h linux-3.14.43/arch/arm/include/asm/thread_info.h
+--- linux-3.14.43.orig/arch/arm/include/asm/thread_info.h 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/include/asm/thread_info.h 2015-05-31 15:35:42.757633771 -0500
+@@ -50,6 +50,7 @@
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+@@ -142,6 +143,7 @@
+ #define TIF_SIGPENDING 0
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 3
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+ #define TIF_SYSCALL_TRACEPOINT 10
+@@ -154,6 +156,7 @@
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+diff -Nur linux-3.14.43.orig/arch/arm/Kconfig linux-3.14.43/arch/arm/Kconfig
+--- linux-3.14.43.orig/arch/arm/Kconfig 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/Kconfig 2015-05-31 15:35:42.437633774 -0500
+@@ -59,6 +59,7 @@
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UID16
+diff -Nur linux-3.14.43.orig/arch/arm/kernel/asm-offsets.c linux-3.14.43/arch/arm/kernel/asm-offsets.c
+--- linux-3.14.43.orig/arch/arm/kernel/asm-offsets.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kernel/asm-offsets.c 2015-05-31 15:35:42.757633771 -0500
+@@ -54,6 +54,7 @@
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+diff -Nur linux-3.14.43.orig/arch/arm/kernel/entry-armv.S linux-3.14.43/arch/arm/kernel/entry-armv.S
+--- linux-3.14.43.orig/arch/arm/kernel/entry-armv.S 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kernel/entry-armv.S 2015-05-31 15:35:42.757633771 -0500
+@@ -205,11 +205,18 @@
+ #ifdef CONFIG_PREEMPT
+ get_thread_info tsk
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
++
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -224,6 +231,8 @@
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ moveq pc, r8 @ go again
+ b 1b
+ #endif
+diff -Nur linux-3.14.43.orig/arch/arm/kernel/process.c linux-3.14.43/arch/arm/kernel/process.c
+--- linux-3.14.43.orig/arch/arm/kernel/process.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kernel/process.c 2015-05-31 15:35:42.761633771 -0500
+@@ -434,6 +434,30 @@
+ }
+
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
++
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+diff -Nur linux-3.14.43.orig/arch/arm/kernel/signal.c linux-3.14.43/arch/arm/kernel/signal.c
+--- linux-3.14.43.orig/arch/arm/kernel/signal.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kernel/signal.c 2015-05-31 15:35:42.761633771 -0500
+@@ -573,7 +573,8 @@
+ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+diff -Nur linux-3.14.43.orig/arch/arm/kernel/unwind.c linux-3.14.43/arch/arm/kernel/unwind.c
+--- linux-3.14.43.orig/arch/arm/kernel/unwind.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kernel/unwind.c 2015-05-31 15:35:42.761633771 -0500
+@@ -87,7 +87,7 @@
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+
+ /* Convert a prel31 symbol to an absolute address */
+@@ -195,7 +195,7 @@
+ /* module unwind tables */
+ struct unwind_table *table;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+@@ -207,7 +207,7 @@
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -469,9 +469,9 @@
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+ }
+@@ -483,9 +483,9 @@
+ if (!tab)
+ return;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/kvm/arm.c linux-3.14.43/arch/arm/kvm/arm.c
+--- linux-3.14.43.orig/arch/arm/kvm/arm.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kvm/arm.c 2015-05-31 15:35:42.761633771 -0500
+@@ -495,9 +495,9 @@
+
+ static void vcpu_pause(struct kvm_vcpu *vcpu)
+ {
+- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+- wait_event_interruptible(*wq, !vcpu->arch.pause);
++ swait_event_interruptible(*wq, !vcpu->arch.pause);
+ }
+
+ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+diff -Nur linux-3.14.43.orig/arch/arm/kvm/psci.c linux-3.14.43/arch/arm/kvm/psci.c
+--- linux-3.14.43.orig/arch/arm/kvm/psci.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/kvm/psci.c 2015-05-31 15:35:42.761633771 -0500
+@@ -36,7 +36,7 @@
+ {
+ struct kvm *kvm = source_vcpu->kvm;
+ struct kvm_vcpu *vcpu = NULL, *tmp;
+- wait_queue_head_t *wq;
++ struct swait_head *wq;
+ unsigned long cpu_id;
+ unsigned long mpidr;
+ phys_addr_t target_pc;
+@@ -80,7 +80,7 @@
+ smp_mb(); /* Make sure the above is visible */
+
+ wq = kvm_arch_vcpu_wq(vcpu);
+- wake_up_interruptible(wq);
++ swait_wake_interruptible(wq);
+
+ return KVM_PSCI_RET_SUCCESS;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.14.43/arch/arm/mach-at91/at91rm9200_time.c
+--- linux-3.14.43.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-at91/at91rm9200_time.c 2015-05-31 15:35:42.761633771 -0500
+@@ -134,6 +134,7 @@
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
++ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
+ case CLOCK_EVT_MODE_RESUME:
+ irqmask = 0;
+ break;
+diff -Nur linux-3.14.43.orig/arch/arm/mach-at91/at91sam926x_time.c linux-3.14.43/arch/arm/mach-at91/at91sam926x_time.c
+--- linux-3.14.43.orig/arch/arm/mach-at91/at91sam926x_time.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-at91/at91sam926x_time.c 2015-05-31 15:35:42.765633771 -0500
+@@ -78,7 +78,7 @@
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+-
++static struct irqaction at91sam926x_pit_irq;
+ /*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+@@ -87,6 +87,8 @@
+ {
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ /* Set up irq handler */
++ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
+ /* update clocksource counter */
+ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
+@@ -99,6 +101,7 @@
+ case CLOCK_EVT_MODE_UNUSED:
+ /* disable irq, leaving the clocksource active */
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
++ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+diff -Nur linux-3.14.43.orig/arch/arm/mach-exynos/platsmp.c linux-3.14.43/arch/arm/mach-exynos/platsmp.c
+--- linux-3.14.43.orig/arch/arm/mach-exynos/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-exynos/platsmp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -71,7 +71,7 @@
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -84,8 +84,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -97,7 +97,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -126,7 +126,7 @@
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -165,7 +165,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-msm/platsmp.c linux-3.14.43/arch/arm/mach-msm/platsmp.c
+--- linux-3.14.43.orig/arch/arm/mach-msm/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-msm/platsmp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -30,7 +30,7 @@
+
+ extern void msm_secondary_startup(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static inline int get_core_count(void)
+ {
+@@ -50,8 +50,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static void prepare_cold_cpu(unsigned int cpu)
+@@ -88,7 +88,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -121,7 +121,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-omap2/omap-smp.c linux-3.14.43/arch/arm/mach-omap2/omap-smp.c
+--- linux-3.14.43.orig/arch/arm/mach-omap2/omap-smp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-omap2/omap-smp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -42,7 +42,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -73,8 +73,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -88,7 +88,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -165,7 +165,7 @@
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-prima2/platsmp.c linux-3.14.43/arch/arm/mach-prima2/platsmp.c
+--- linux-3.14.43.orig/arch/arm/mach-prima2/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-prima2/platsmp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -23,7 +23,7 @@
+ static void __iomem *scu_base;
+ static void __iomem *rsc_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static struct map_desc scu_io_desc __initdata = {
+ .length = SZ_4K,
+@@ -56,8 +56,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static struct of_device_id rsc_ids[] = {
+@@ -95,7 +95,7 @@
+ /* make sure write buffer is drained */
+ mb();
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -127,7 +127,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-spear/platsmp.c linux-3.14.43/arch/arm/mach-spear/platsmp.c
+--- linux-3.14.43.orig/arch/arm/mach-spear/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-spear/platsmp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -20,7 +20,7 @@
+ #include <mach/spear.h>
+ #include "generic.h"
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+
+@@ -36,8 +36,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -48,7 +48,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -75,7 +75,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-sti/platsmp.c linux-3.14.43/arch/arm/mach-sti/platsmp.c
+--- linux-3.14.43.orig/arch/arm/mach-sti/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-sti/platsmp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -34,7 +34,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void sti_secondary_init(unsigned int cpu)
+ {
+@@ -49,8 +49,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -61,7 +61,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -92,7 +92,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mach-ux500/platsmp.c linux-3.14.43/arch/arm/mach-ux500/platsmp.c
+--- linux-3.14.43.orig/arch/arm/mach-ux500/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mach-ux500/platsmp.c 2015-05-31 15:35:42.765633771 -0500
+@@ -51,7 +51,7 @@
+ return NULL;
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void ux500_secondary_init(unsigned int cpu)
+ {
+@@ -64,8 +64,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -76,7 +76,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -97,7 +97,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mm/fault.c linux-3.14.43/arch/arm/mm/fault.c
+--- linux-3.14.43.orig/arch/arm/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mm/fault.c 2015-05-31 15:35:42.765633771 -0500
+@@ -277,7 +277,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(regs))
+@@ -431,6 +431,9 @@
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -498,6 +501,9 @@
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/arm/mm/highmem.c linux-3.14.43/arch/arm/mm/highmem.c
+--- linux-3.14.43.orig/arch/arm/mm/highmem.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/mm/highmem.c 2015-05-31 15:35:42.769633771 -0500
+@@ -38,6 +38,7 @@
+
+ void *kmap_atomic(struct page *page)
+ {
++ pte_t pte = mk_pte(page, kmap_prot);
+ unsigned int idx;
+ unsigned long vaddr;
+ void *kmap;
+@@ -76,7 +77,10 @@
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
+ */
+- set_top_pte(vaddr, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_top_pte(vaddr, pte);
+
+ return (void *)vaddr;
+ }
+@@ -93,12 +97,15 @@
+
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+- set_top_pte(vaddr, __pte(0));
+ #else
+ (void) idx; /* to kill a warning */
+ #endif
++ set_top_pte(vaddr, __pte(0));
+ kmap_atomic_idx_pop();
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+@@ -110,6 +117,7 @@
+
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++ pte_t pte = pfn_pte(pfn, kmap_prot);
+ unsigned long vaddr;
+ int idx, type;
+
+@@ -121,7 +129,10 @@
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(get_top_pte(vaddr)));
+ #endif
+- set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_top_pte(vaddr, pte);
+
+ return (void *)vaddr;
+ }
+@@ -135,3 +146,29 @@
+
+ return pte_page(get_top_pte(vaddr));
+ }
++
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
++
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx),
++ next_p->kmap_pte[i]);
++ }
++}
++#endif
+diff -Nur linux-3.14.43.orig/arch/arm/plat-versatile/platsmp.c linux-3.14.43/arch/arm/plat-versatile/platsmp.c
+--- linux-3.14.43.orig/arch/arm/plat-versatile/platsmp.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/arm/plat-versatile/platsmp.c 2015-05-31 15:35:42.769633771 -0500
+@@ -30,7 +30,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void versatile_secondary_init(unsigned int cpu)
+ {
+@@ -43,8 +43,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -55,7 +55,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -85,7 +85,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.14.43.orig/arch/avr32/mm/fault.c linux-3.14.43/arch/avr32/mm/fault.c
+--- linux-3.14.43.orig/arch/avr32/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/avr32/mm/fault.c 2015-05-31 15:35:42.769633771 -0500
+@@ -81,7 +81,7 @@
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ goto no_context;
+
+ local_irq_enable();
+diff -Nur linux-3.14.43.orig/arch/cris/mm/fault.c linux-3.14.43/arch/cris/mm/fault.c
+--- linux-3.14.43.orig/arch/cris/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/cris/mm/fault.c 2015-05-31 15:35:42.769633771 -0500
+@@ -113,7 +113,7 @@
+ * user context, we must not take the fault.
+ */
+
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(regs))
+diff -Nur linux-3.14.43.orig/arch/frv/mm/fault.c linux-3.14.43/arch/frv/mm/fault.c
+--- linux-3.14.43.orig/arch/frv/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/frv/mm/fault.c 2015-05-31 15:35:42.769633771 -0500
+@@ -78,7 +78,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(__frame))
+diff -Nur linux-3.14.43.orig/arch/ia64/mm/fault.c linux-3.14.43/arch/ia64/mm/fault.c
+--- linux-3.14.43.orig/arch/ia64/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/ia64/mm/fault.c 2015-05-31 15:35:42.769633771 -0500
+@@ -96,7 +96,7 @@
+ /*
+ * If we're in an interrupt or have no user context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+diff -Nur linux-3.14.43.orig/arch/Kconfig linux-3.14.43/arch/Kconfig
+--- linux-3.14.43.orig/arch/Kconfig 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/Kconfig 2015-05-31 15:35:42.433633774 -0500
+@@ -6,6 +6,7 @@
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+diff -Nur linux-3.14.43.orig/arch/m32r/mm/fault.c linux-3.14.43/arch/m32r/mm/fault.c
+--- linux-3.14.43.orig/arch/m32r/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/m32r/mm/fault.c 2015-05-31 15:35:42.773633771 -0500
+@@ -114,7 +114,7 @@
+ * If we're in an interrupt or have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ if (error_code & ACE_USERMODE)
+diff -Nur linux-3.14.43.orig/arch/m68k/mm/fault.c linux-3.14.43/arch/m68k/mm/fault.c
+--- linux-3.14.43.orig/arch/m68k/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/m68k/mm/fault.c 2015-05-31 15:35:42.773633771 -0500
+@@ -81,7 +81,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(regs))
+diff -Nur linux-3.14.43.orig/arch/microblaze/mm/fault.c linux-3.14.43/arch/microblaze/mm/fault.c
+--- linux-3.14.43.orig/arch/microblaze/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/microblaze/mm/fault.c 2015-05-31 15:35:42.773633771 -0500
+@@ -107,7 +107,7 @@
+ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ is_write = 0;
+
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
+
+diff -Nur linux-3.14.43.orig/arch/mips/Kconfig linux-3.14.43/arch/mips/Kconfig
+--- linux-3.14.43.orig/arch/mips/Kconfig 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/mips/Kconfig 2015-05-31 15:35:42.773633771 -0500
+@@ -2094,7 +2094,7 @@
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+diff -Nur linux-3.14.43.orig/arch/mips/kernel/signal.c linux-3.14.43/arch/mips/kernel/signal.c
+--- linux-3.14.43.orig/arch/mips/kernel/signal.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/mips/kernel/signal.c 2015-05-31 15:35:42.773633771 -0500
+@@ -575,6 +575,7 @@
+ __u32 thread_info_flags)
+ {
+ local_irq_enable();
++ preempt_check_resched();
+
+ user_exit();
+
+diff -Nur linux-3.14.43.orig/arch/mips/mm/fault.c linux-3.14.43/arch/mips/mm/fault.c
+--- linux-3.14.43.orig/arch/mips/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/mips/mm/fault.c 2015-05-31 15:35:42.809633770 -0500
+@@ -89,7 +89,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ if (user_mode(regs))
+diff -Nur linux-3.14.43.orig/arch/mips/mm/init.c linux-3.14.43/arch/mips/mm/init.c
+--- linux-3.14.43.orig/arch/mips/mm/init.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/mips/mm/init.c 2015-05-31 15:35:42.809633770 -0500
+@@ -124,7 +124,7 @@
+
+ BUG_ON(Page_dcache_dirty(page));
+
+- pagefault_disable();
++ raw_pagefault_disable();
+ idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+ #ifdef CONFIG_MIPS_MT_SMTC
+ idx += FIX_N_COLOURS * smp_processor_id() +
+@@ -191,7 +191,7 @@
+ write_c0_entryhi(old_ctx);
+ EXIT_CRITICAL(flags);
+ #endif
+- pagefault_enable();
++ raw_pagefault_enable();
+ }
+
+ void copy_user_highpage(struct page *to, struct page *from,
+diff -Nur linux-3.14.43.orig/arch/mn10300/mm/fault.c linux-3.14.43/arch/mn10300/mm/fault.c
+--- linux-3.14.43.orig/arch/mn10300/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/mn10300/mm/fault.c 2015-05-31 15:35:42.809633770 -0500
+@@ -168,7 +168,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+diff -Nur linux-3.14.43.orig/arch/parisc/mm/fault.c linux-3.14.43/arch/parisc/mm/fault.c
+--- linux-3.14.43.orig/arch/parisc/mm/fault.c 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/parisc/mm/fault.c 2015-05-31 15:35:42.809633770 -0500
+@@ -207,7 +207,7 @@
+ int fault;
+ unsigned int flags;
+
+- if (in_atomic())
++ if (pagefault_disabled())
+ goto no_context;
+
+ tsk = current;
+diff -Nur linux-3.14.43.orig/arch/powerpc/include/asm/kvm_host.h linux-3.14.43/arch/powerpc/include/asm/kvm_host.h
+--- linux-3.14.43.orig/arch/powerpc/include/asm/kvm_host.h 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/powerpc/include/asm/kvm_host.h 2015-05-31 15:35:42.813633770 -0500
+@@ -295,7 +295,7 @@
+ u8 in_guest;
+ struct list_head runnable_threads;
+ spinlock_t lock;
+- wait_queue_head_t wq;
++ struct swait_head wq;
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
+@@ -612,7 +612,7 @@
+ u8 prodded;
+ u32 last_inst;
+
+- wait_queue_head_t *wqp;
++ struct swait_head *wqp;
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+diff -Nur linux-3.14.43.orig/arch/powerpc/include/asm/thread_info.h linux-3.14.43/arch/powerpc/include/asm/thread_info.h
+--- linux-3.14.43.orig/arch/powerpc/include/asm/thread_info.h 2015-05-17 11:54:01.000000000 -0500
++++ linux-3.14.43/arch/powerpc/include/asm/thread_info.h 2015-05-31 15:35:43.049633768 -0500
+@@ -43,6 +43,8 @@
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ struct restart_block restart_block;
+ unsigned long local_flags; /* private flags for thread */
+
+@@ -88,8 +90,7 @@
+ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+ #define TIF_SIGPENDING 1 /* signal pending */
+ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+- TIF_NEED_RESCHED */
++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
+ #define TIF_32BIT 4 /* 32 bit binary */
+ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+ #define TIF_SYSCALL_AUDIT 7 /*