summaryrefslogtreecommitdiff
path: root/target/linux
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@uclibc-ng.org>2016-06-19 16:54:33 +0200
committerWaldemar Brodkorb <wbx@uclibc-ng.org>2016-06-19 16:54:51 +0200
commit858ab548876e3924a2f9563f9f7140d6357302df (patch)
treef3f44dc43dfe6a7d79b4cc29e61133773495fcb7 /target/linux
parent73ad3670c8f882bf99d271f23ae3c1be4325462e (diff)
linux: update to 4.4.13, add realtime patch
Diffstat (limited to 'target/linux')
-rw-r--r--target/linux/config/Config.in.kernel6
-rw-r--r--target/linux/patches/4.4.13/coldfire-sighandler.patch (renamed from target/linux/patches/4.4.10/coldfire-sighandler.patch)0
-rw-r--r--target/linux/patches/4.4.13/initramfs-nosizelimit.patch (renamed from target/linux/patches/4.4.10/initramfs-nosizelimit.patch)0
-rw-r--r--target/linux/patches/4.4.13/ld-or1k.patch (renamed from target/linux/patches/4.4.10/ld-or1k.patch)0
-rw-r--r--target/linux/patches/4.4.13/macsonic.patch (renamed from target/linux/patches/4.4.10/macsonic.patch)0
-rw-r--r--target/linux/patches/4.4.13/mips-xz.patch (renamed from target/linux/patches/4.4.10/mips-xz.patch)0
-rw-r--r--target/linux/patches/4.4.13/realtime.patch30654
-rw-r--r--target/linux/patches/4.4.13/startup.patch (renamed from target/linux/patches/4.4.10/startup.patch)0
-rw-r--r--target/linux/patches/4.4.13/use-libgcc-for-sh.patch (renamed from target/linux/patches/4.4.10/use-libgcc-for-sh.patch)0
9 files changed, 30658 insertions, 2 deletions
diff --git a/target/linux/config/Config.in.kernel b/target/linux/config/Config.in.kernel
index 9478e2860..c33fc2473 100644
--- a/target/linux/config/Config.in.kernel
+++ b/target/linux/config/Config.in.kernel
@@ -134,14 +134,16 @@ config ADK_KERNEL_PREEMPT__LL
config ADK_KERNEL_PREEMPT_RTB
bool "Preemptible Kernel (Basic RealTime)"
select ADK_KERNEL_HIGH_RES_TIMERS
- depends on ADK_TARGET_KERNEL_VERSION_4_1
+ depends on ADK_TARGET_KERNEL_VERSION_4_1 \
+ || ADK_TARGET_KERNEL_VERSION_4_4
help
Preemptible Kernel (Basic RT)
config ADK_KERNEL_PREEMPT_RT_FULL
bool "Fully Preemptible Kernel (RealTime)"
select ADK_KERNEL_HIGH_RES_TIMERS
- depends on ADK_TARGET_KERNEL_VERSION_4_1
+ depends on ADK_TARGET_KERNEL_VERSION_4_1 \
+ || ADK_TARGET_KERNEL_VERSION_4_4
help
Fully Preemptible Kernel (RealTime)
https://www.kernel.org/pub/linux/kernel/projects/rt/
diff --git a/target/linux/patches/4.4.10/coldfire-sighandler.patch b/target/linux/patches/4.4.13/coldfire-sighandler.patch
index c52a4e228..c52a4e228 100644
--- a/target/linux/patches/4.4.10/coldfire-sighandler.patch
+++ b/target/linux/patches/4.4.13/coldfire-sighandler.patch
diff --git a/target/linux/patches/4.4.10/initramfs-nosizelimit.patch b/target/linux/patches/4.4.13/initramfs-nosizelimit.patch
index 40d2f6bd8..40d2f6bd8 100644
--- a/target/linux/patches/4.4.10/initramfs-nosizelimit.patch
+++ b/target/linux/patches/4.4.13/initramfs-nosizelimit.patch
diff --git a/target/linux/patches/4.4.10/ld-or1k.patch b/target/linux/patches/4.4.13/ld-or1k.patch
index 264f9166f..264f9166f 100644
--- a/target/linux/patches/4.4.10/ld-or1k.patch
+++ b/target/linux/patches/4.4.13/ld-or1k.patch
diff --git a/target/linux/patches/4.4.10/macsonic.patch b/target/linux/patches/4.4.13/macsonic.patch
index 75a6fcad2..75a6fcad2 100644
--- a/target/linux/patches/4.4.10/macsonic.patch
+++ b/target/linux/patches/4.4.13/macsonic.patch
diff --git a/target/linux/patches/4.4.10/mips-xz.patch b/target/linux/patches/4.4.13/mips-xz.patch
index 5cfac6254..5cfac6254 100644
--- a/target/linux/patches/4.4.10/mips-xz.patch
+++ b/target/linux/patches/4.4.13/mips-xz.patch
diff --git a/target/linux/patches/4.4.13/realtime.patch b/target/linux/patches/4.4.13/realtime.patch
new file mode 100644
index 000000000..f639a07b1
--- /dev/null
+++ b/target/linux/patches/4.4.13/realtime.patch
@@ -0,0 +1,30654 @@
+diff -Nur linux-4.4.13.orig/arch/arm/include/asm/switch_to.h linux-4.4.13/arch/arm/include/asm/switch_to.h
+--- linux-4.4.13.orig/arch/arm/include/asm/switch_to.h 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/include/asm/switch_to.h 2016-06-19 16:02:36.688488688 +0200
+@@ -3,6 +3,13 @@
+
+ #include <linux/thread_info.h>
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ /*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -25,6 +32,7 @@
+ #define switch_to(prev,next,last) \
+ do { \
+ __complete_pending_tlbi(); \
++ switch_kmaps(prev, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+ } while (0)
+
+diff -Nur linux-4.4.13.orig/arch/arm/include/asm/thread_info.h linux-4.4.13/arch/arm/include/asm/thread_info.h
+--- linux-4.4.13.orig/arch/arm/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/include/asm/thread_info.h 2016-06-19 16:02:36.688488688 +0200
+@@ -49,6 +49,7 @@
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ __u32 cpu; /* cpu */
+@@ -142,7 +143,8 @@
+ #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY 7
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+@@ -152,6 +154,7 @@
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+@@ -167,7 +170,8 @@
+ * Change these and you break ASM code in entry-common.S
+ */
+ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
++ _TIF_NEED_RESCHED_LAZY)
+
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_ARM_THREAD_INFO_H */
+diff -Nur linux-4.4.13.orig/arch/arm/Kconfig linux-4.4.13/arch/arm/Kconfig
+--- linux-4.4.13.orig/arch/arm/Kconfig 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/Kconfig 2016-06-19 16:02:36.688488688 +0200
+@@ -33,7 +33,7 @@
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+ select HAVE_ARCH_TRACEHOOK
+@@ -68,6 +68,7 @@
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/asm-offsets.c linux-4.4.13/arch/arm/kernel/asm-offsets.c
+--- linux-4.4.13.orig/arch/arm/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/asm-offsets.c 2016-06-19 16:02:36.688488688 +0200
+@@ -65,6 +65,7 @@
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/entry-armv.S linux-4.4.13/arch/arm/kernel/entry-armv.S
+--- linux-4.4.13.orig/arch/arm/kernel/entry-armv.S 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/entry-armv.S 2016-06-19 16:02:36.688488688 +0200
+@@ -215,11 +215,18 @@
+ #ifdef CONFIG_PREEMPT
+ get_thread_info tsk
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
++
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -234,8 +241,14 @@
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ reteq r8 @ go again
+- b 1b
++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r0, #0 @ if preempt lazy count != 0
++ beq 1b
++ ret r8 @ go again
++
+ #endif
+
+ __und_fault:
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/entry-common.S linux-4.4.13/arch/arm/kernel/entry-common.S
+--- linux-4.4.13.orig/arch/arm/kernel/entry-common.S 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/entry-common.S 2016-06-19 16:02:36.688488688 +0200
+@@ -36,7 +36,9 @@
+ UNWIND(.cantunwind )
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne fast_work_pending
++ tst r1, #_TIF_SECCOMP
+ bne fast_work_pending
+
+ /* perform architecture specific actions before user return */
+@@ -62,8 +64,11 @@
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++ bne do_slower_path
++ tst r1, #_TIF_SECCOMP
+ beq no_work_pending
++do_slower_path:
+ UNWIND(.fnend )
+ ENDPROC(ret_fast_syscall)
+
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/process.c linux-4.4.13/arch/arm/kernel/process.c
+--- linux-4.4.13.orig/arch/arm/kernel/process.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/process.c 2016-06-19 16:02:36.688488688 +0200
+@@ -319,6 +319,30 @@
+ }
+
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
++
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/signal.c linux-4.4.13/arch/arm/kernel/signal.c
+--- linux-4.4.13.orig/arch/arm/kernel/signal.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/signal.c 2016-06-19 16:02:36.688488688 +0200
+@@ -572,7 +572,8 @@
+ */
+ trace_hardirqs_off();
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/smp.c linux-4.4.13/arch/arm/kernel/smp.c
+--- linux-4.4.13.orig/arch/arm/kernel/smp.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/smp.c 2016-06-19 16:02:36.688488688 +0200
+@@ -230,8 +230,6 @@
+ flush_cache_louis();
+ local_flush_tlb_all();
+
+- clear_tasks_mm_cpumask(cpu);
+-
+ return 0;
+ }
+
+@@ -247,6 +245,9 @@
+ pr_err("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
++
++ clear_tasks_mm_cpumask(cpu);
++
+ pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+diff -Nur linux-4.4.13.orig/arch/arm/kernel/unwind.c linux-4.4.13/arch/arm/kernel/unwind.c
+--- linux-4.4.13.orig/arch/arm/kernel/unwind.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kernel/unwind.c 2016-06-19 16:02:36.688488688 +0200
+@@ -93,7 +93,7 @@
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+
+ /* Convert a prel31 symbol to an absolute address */
+@@ -201,7 +201,7 @@
+ /* module unwind tables */
+ struct unwind_table *table;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+@@ -213,7 +213,7 @@
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -529,9 +529,9 @@
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+ }
+@@ -543,9 +543,9 @@
+ if (!tab)
+ return;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/kvm/arm.c linux-4.4.13/arch/arm/kvm/arm.c
+--- linux-4.4.13.orig/arch/arm/kvm/arm.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kvm/arm.c 2016-06-19 16:02:36.692488841 +0200
+@@ -498,18 +498,18 @@
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+ vcpu->arch.pause = false;
+- wake_up_interruptible(wq);
++ swake_up(wq);
+ }
+ }
+
+ static void vcpu_sleep(struct kvm_vcpu *vcpu)
+ {
+- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
++ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ (!vcpu->arch.pause)));
+ }
+
+@@ -568,7 +568,7 @@
+ * involves poking the GIC, which must be done in a
+ * non-preemptible context.
+ */
+- preempt_disable();
++ migrate_disable();
+ kvm_timer_flush_hwstate(vcpu);
+ kvm_vgic_flush_hwstate(vcpu);
+
+@@ -587,7 +587,7 @@
+ local_irq_enable();
+ kvm_timer_sync_hwstate(vcpu);
+ kvm_vgic_sync_hwstate(vcpu);
+- preempt_enable();
++ migrate_enable();
+ continue;
+ }
+
+@@ -641,7 +641,7 @@
+
+ kvm_vgic_sync_hwstate(vcpu);
+
+- preempt_enable();
++ migrate_enable();
+
+ ret = handle_exit(vcpu, run, ret);
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/kvm/psci.c linux-4.4.13/arch/arm/kvm/psci.c
+--- linux-4.4.13.orig/arch/arm/kvm/psci.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/kvm/psci.c 2016-06-19 16:02:36.692488841 +0200
+@@ -70,7 +70,7 @@
+ {
+ struct kvm *kvm = source_vcpu->kvm;
+ struct kvm_vcpu *vcpu = NULL;
+- wait_queue_head_t *wq;
++ struct swait_queue_head *wq;
+ unsigned long cpu_id;
+ unsigned long context_id;
+ phys_addr_t target_pc;
+@@ -119,7 +119,7 @@
+ smp_mb(); /* Make sure the above is visible */
+
+ wq = kvm_arch_vcpu_wq(vcpu);
+- wake_up_interruptible(wq);
++ swake_up(wq);
+
+ return PSCI_RET_SUCCESS;
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/at91rm9200.c linux-4.4.13/arch/arm/mach-at91/at91rm9200.c
+--- linux-4.4.13.orig/arch/arm/mach-at91/at91rm9200.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-at91/at91rm9200.c 2016-06-19 16:02:36.692488841 +0200
+@@ -12,7 +12,6 @@
+ #include <linux/of_platform.h>
+
+ #include <asm/mach/arch.h>
+-#include <asm/system_misc.h>
+
+ #include "generic.h"
+ #include "soc.h"
+@@ -33,7 +32,6 @@
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+
+- arm_pm_idle = at91rm9200_idle;
+ at91rm9200_pm_init();
+ }
+
+diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/at91sam9.c linux-4.4.13/arch/arm/mach-at91/at91sam9.c
+--- linux-4.4.13.orig/arch/arm/mach-at91/at91sam9.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-at91/at91sam9.c 2016-06-19 16:02:36.692488841 +0200
+@@ -62,8 +62,6 @@
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+-
+- arm_pm_idle = at91sam9_idle;
+ }
+
+ static void __init at91sam9_dt_device_init(void)
+diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/generic.h linux-4.4.13/arch/arm/mach-at91/generic.h
+--- linux-4.4.13.orig/arch/arm/mach-at91/generic.h 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-at91/generic.h 2016-06-19 16:02:36.692488841 +0200
+@@ -11,27 +11,18 @@
+ #ifndef _AT91_GENERIC_H
+ #define _AT91_GENERIC_H
+
+-#include <linux/of.h>
+-#include <linux/reboot.h>
+-
+- /* Map io */
+-extern void __init at91_map_io(void);
+-extern void __init at91_alt_map_io(void);
+-
+-/* idle */
+-extern void at91rm9200_idle(void);
+-extern void at91sam9_idle(void);
+-
+ #ifdef CONFIG_PM
+ extern void __init at91rm9200_pm_init(void);
+ extern void __init at91sam9260_pm_init(void);
+ extern void __init at91sam9g45_pm_init(void);
+ extern void __init at91sam9x5_pm_init(void);
++extern void __init sama5_pm_init(void);
+ #else
+ static inline void __init at91rm9200_pm_init(void) { }
+ static inline void __init at91sam9260_pm_init(void) { }
+ static inline void __init at91sam9g45_pm_init(void) { }
+ static inline void __init at91sam9x5_pm_init(void) { }
++static inline void __init sama5_pm_init(void) { }
+ #endif
+
+ #endif /* _AT91_GENERIC_H */
+diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/Kconfig linux-4.4.13/arch/arm/mach-at91/Kconfig
+--- linux-4.4.13.orig/arch/arm/mach-at91/Kconfig 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-at91/Kconfig 2016-06-19 16:02:36.692488841 +0200
+@@ -99,6 +99,7 @@
+ config COMMON_CLK_AT91
+ bool
+ select COMMON_CLK
++ select MFD_SYSCON
+
+ config HAVE_AT91_SMD
+ bool
+diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/pm.c linux-4.4.13/arch/arm/mach-at91/pm.c
+--- linux-4.4.13.orig/arch/arm/mach-at91/pm.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-at91/pm.c 2016-06-19 16:02:36.692488841 +0200
+@@ -31,10 +31,13 @@
+ #include <asm/mach/irq.h>
+ #include <asm/fncpy.h>
+ #include <asm/cacheflush.h>
++#include <asm/system_misc.h>
+
+ #include "generic.h"
+ #include "pm.h"
+
++static void __iomem *pmc;
++
+ /*
+ * FIXME: this is needed to communicate between the pinctrl driver and
+ * the PM implementation in the machine. Possibly part of the PM
+@@ -87,7 +90,7 @@
+ unsigned long scsr;
+ int i;
+
+- scsr = at91_pmc_read(AT91_PMC_SCSR);
++ scsr = readl(pmc + AT91_PMC_SCSR);
+
+ /* USB must not be using PLLB */
+ if ((scsr & at91_pm_data.uhp_udp_mask) != 0) {
+@@ -101,8 +104,7 @@
+
+ if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
+ continue;
+-
+- css = at91_pmc_read(AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
++ css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
+ if (css != AT91_PMC_CSS_SLOW) {
+ pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
+ return 0;
+@@ -145,8 +147,8 @@
+ flush_cache_all();
+ outer_disable();
+
+- at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0],
+- at91_ramc_base[1], pm_data);
++ at91_suspend_sram_fn(pmc, at91_ramc_base[0],
++ at91_ramc_base[1], pm_data);
+
+ outer_resume();
+ }
+@@ -353,6 +355,21 @@
+ at91_pm_set_standby(standby);
+ }
+
++void at91rm9200_idle(void)
++{
++ /*
++ * Disable the processor clock. The processor will be automatically
++ * re-enabled by an interrupt or by a reset.
++ */
++ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
++}
++
++void at91sam9_idle(void)
++{
++ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
++ cpu_do_idle();
++}
++
+ static void __init at91_pm_sram_init(void)
+ {
+ struct gen_pool *sram_pool;
+@@ -399,13 +416,36 @@
+ &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
+ }
+
+-static void __init at91_pm_init(void)
++static const struct of_device_id atmel_pmc_ids[] __initconst = {
++ { .compatible = "atmel,at91rm9200-pmc" },
++ { .compatible = "atmel,at91sam9260-pmc" },
++ { .compatible = "atmel,at91sam9g45-pmc" },
++ { .compatible = "atmel,at91sam9n12-pmc" },
++ { .compatible = "atmel,at91sam9x5-pmc" },
++ { .compatible = "atmel,sama5d3-pmc" },
++ { .compatible = "atmel,sama5d2-pmc" },
++ { /* sentinel */ },
++};
++
++static void __init at91_pm_init(void (*pm_idle)(void))
+ {
+- at91_pm_sram_init();
++ struct device_node *pmc_np;
+
+ if (at91_cpuidle_device.dev.platform_data)
+ platform_device_register(&at91_cpuidle_device);
+
++ pmc_np = of_find_matching_node(NULL, atmel_pmc_ids);
++ pmc = of_iomap(pmc_np, 0);
++ if (!pmc) {
++ pr_err("AT91: PM not supported, PMC not found\n");
++ return;
++ }
++
++ if (pm_idle)
++ arm_pm_idle = pm_idle;
++
++ at91_pm_sram_init();
++
+ if (at91_suspend_sram_fn)
+ suspend_set_ops(&at91_pm_ops);
+ else
+@@ -424,7 +464,7 @@
+ at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP;
+ at91_pm_data.memctrl = AT91_MEMCTRL_MC;
+
+- at91_pm_init();
++ at91_pm_init(at91rm9200_idle);
+ }
+
+ void __init at91sam9260_pm_init(void)
+@@ -432,7 +472,7 @@
+ at91_dt_ramc();
+ at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC;
+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
+- return at91_pm_init();
++ at91_pm_init(at91sam9_idle);
+ }
+
+ void __init at91sam9g45_pm_init(void)
+@@ -440,7 +480,7 @@
+ at91_dt_ramc();
+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP;
+ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
+- return at91_pm_init();
++ at91_pm_init(at91sam9_idle);
+ }
+
+ void __init at91sam9x5_pm_init(void)
+@@ -448,5 +488,13 @@
+ at91_dt_ramc();
+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
+ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
+- return at91_pm_init();
++ at91_pm_init(at91sam9_idle);
++}
++
++void __init sama5_pm_init(void)
++{
++ at91_dt_ramc();
++ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
++ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
++ at91_pm_init(NULL);
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/sama5.c linux-4.4.13/arch/arm/mach-at91/sama5.c
+--- linux-4.4.13.orig/arch/arm/mach-at91/sama5.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-at91/sama5.c 2016-06-19 16:02:36.692488841 +0200
+@@ -51,7 +51,7 @@
+ soc_dev = soc_device_to_device(soc);
+
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+- at91sam9x5_pm_init();
++ sama5_pm_init();
+ }
+
+ static const char *const sama5_dt_board_compat[] __initconst = {
+diff -Nur linux-4.4.13.orig/arch/arm/mach-exynos/platsmp.c linux-4.4.13/arch/arm/mach-exynos/platsmp.c
+--- linux-4.4.13.orig/arch/arm/mach-exynos/platsmp.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-exynos/platsmp.c 2016-06-19 16:02:36.692488841 +0200
+@@ -230,7 +230,7 @@
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -243,8 +243,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
+@@ -308,7 +308,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -335,7 +335,7 @@
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -381,7 +381,7 @@
+ * calibrations, then wait for it to finish
+ */
+ fail:
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? ret : 0;
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-hisi/platmcpm.c linux-4.4.13/arch/arm/mach-hisi/platmcpm.c
+--- linux-4.4.13.orig/arch/arm/mach-hisi/platmcpm.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-hisi/platmcpm.c 2016-06-19 16:02:36.692488841 +0200
+@@ -61,7 +61,7 @@
+
+ static void __iomem *sysctrl, *fabric;
+ static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static u32 fabric_phys_addr;
+ /*
+ * [0]: bootwrapper physical address
+@@ -113,7 +113,7 @@
+ if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
+ return -EINVAL;
+
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+
+ if (hip04_cpu_table[cluster][cpu])
+ goto out;
+@@ -147,7 +147,7 @@
+
+ out:
+ hip04_cpu_table[cluster][cpu]++;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+
+ return 0;
+ }
+@@ -162,11 +162,11 @@
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ hip04_cpu_table[cluster][cpu]--;
+ if (hip04_cpu_table[cluster][cpu] == 1) {
+ /* A power_up request went ahead of us. */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return;
+ } else if (hip04_cpu_table[cluster][cpu] > 1) {
+ pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
+@@ -174,7 +174,7 @@
+ }
+
+ last_man = hip04_cluster_is_down(cluster);
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ if (last_man) {
+ /* Since it's Cortex A15, disable L2 prefetching. */
+ asm volatile(
+@@ -203,7 +203,7 @@
+ cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
+
+ count = TIMEOUT_MSEC / POLL_MSEC;
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ for (tries = 0; tries < count; tries++) {
+ if (hip04_cpu_table[cluster][cpu])
+ goto err;
+@@ -211,10 +211,10 @@
+ data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
+ if (data & CORE_WFI_STATUS(cpu))
+ break;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ /* Wait for clean L2 when the whole cluster is down. */
+ msleep(POLL_MSEC);
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ }
+ if (tries >= count)
+ goto err;
+@@ -231,10 +231,10 @@
+ goto err;
+ if (hip04_cluster_is_down(cluster))
+ hip04_set_snoop_filter(cluster, 0);
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 1;
+ err:
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 0;
+ }
+ #endif
+diff -Nur linux-4.4.13.orig/arch/arm/mach-imx/Kconfig linux-4.4.13/arch/arm/mach-imx/Kconfig
+--- linux-4.4.13.orig/arch/arm/mach-imx/Kconfig 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-imx/Kconfig 2016-06-19 16:02:36.692488841 +0200
+@@ -524,7 +524,7 @@
+ bool "i.MX6 Quad/DualLite support"
+ select ARM_ERRATA_764369 if SMP
+ select HAVE_ARM_SCU if SMP
+- select HAVE_ARM_TWD if SMP
++ select HAVE_ARM_TWD
+ select PCI_DOMAINS if PCI
+ select PINCTRL_IMX6Q
+ select SOC_IMX6
+diff -Nur linux-4.4.13.orig/arch/arm/mach-omap2/omap-smp.c linux-4.4.13/arch/arm/mach-omap2/omap-smp.c
+--- linux-4.4.13.orig/arch/arm/mach-omap2/omap-smp.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-omap2/omap-smp.c 2016-06-19 16:02:36.692488841 +0200
+@@ -43,7 +43,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -74,8 +74,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -89,7 +89,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -166,7 +166,7 @@
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-prima2/platsmp.c linux-4.4.13/arch/arm/mach-prima2/platsmp.c
+--- linux-4.4.13.orig/arch/arm/mach-prima2/platsmp.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-prima2/platsmp.c 2016-06-19 16:02:36.692488841 +0200
+@@ -22,7 +22,7 @@
+
+ static void __iomem *clk_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sirfsoc_secondary_init(unsigned int cpu)
+ {
+@@ -36,8 +36,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static const struct of_device_id clk_ids[] = {
+@@ -75,7 +75,7 @@
+ /* make sure write buffer is drained */
+ mb();
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -107,7 +107,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-qcom/platsmp.c linux-4.4.13/arch/arm/mach-qcom/platsmp.c
+--- linux-4.4.13.orig/arch/arm/mach-qcom/platsmp.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-qcom/platsmp.c 2016-06-19 16:02:36.692488841 +0200
+@@ -46,7 +46,7 @@
+
+ extern void secondary_startup_arm(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void qcom_cpu_die(unsigned int cpu)
+@@ -60,8 +60,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int scss_release_secondary(unsigned int cpu)
+@@ -284,7 +284,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+@@ -297,7 +297,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return ret;
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-spear/platsmp.c linux-4.4.13/arch/arm/mach-spear/platsmp.c
+--- linux-4.4.13.orig/arch/arm/mach-spear/platsmp.c 2016-06-08 03:14:51.000000000 +0200
++++ linux-4.4.13/arch/arm/mach-spear/platsmp.c 2016-06-19 16:02:36.692488841 +0200
+@@ -32,7 +32,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+
+@@ -47,8 +47,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -59,7 +59,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -84,7 +84,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-4.4.13.orig/arch/arm/mach-sti/platsmp.c linux-4.4.13/arch/arm/mach-sti/platsmp.c
+--- linux-4.4.13.orig/arch/arm/